diff --git a/.dir-locals.el b/.dir-locals.el deleted file mode 100644 index 45fc8cf..0000000 --- a/.dir-locals.el +++ /dev/null @@ -1,205 +0,0 @@ -( - (org-mode - (eval - . - (progn - (set (make-local-variable 'ssh-user) - ;; "pair") - user-login-name) - ;; might be nice to set this as a global property in the org file - (set (make-local-variable 'ssh-host) - "sharing.io") - (set (make-local-variable 'ssh-user-host) - (concat ssh-user "@" ssh-host)) - - (set (make-local-variable 'item-str) - "(nth 4 (org-heading-components))") - (set (make-local-variable 'user-buffer) - (concat user-login-name "." (file-name-base load-file-name))) - (set (make-local-variable 'tmate-sh) - (concat "/tmp/" user-buffer ".target.sh")) - (set (make-local-variable 'socket) - (concat "/tmp/" user-buffer ".target.iisocket")) - (set (make-local-variable 'socket-param) - (concat ":sockets " socket)) - (set (make-local-variable 'sql-sqlite-program) - (executable-find "sqlite3")) - (set (make-local-variable 'select-enable-clipboard) t) - (set (make-local-variable 'select-enable-primary) t) - (set (make-local-variable 'sql-connection-alist) - (list - (list 'raiinbow - (list 'sql-product '(quote sqlite)) - (list 'sql-database "raiinbow.sqlite") - ) - (list 'apisnoop - (list 'sql-product '(quote postgres)) - (list 'sql-user "apisnoop") - (list 'sql-database "apisnoop") - (list 'sql-port (+ (* (user-uid) 10) 1)) - (list 'sql-server "localhost") - ;; (list 'sql-user user-login-name) - ;;(list 'sql-database user-login-name) - ;; (list 'sql-port (concat (number-to-string (user-uid)) "1")) - ) - )) - (set (make-local-variable 'start-tmate-command) - (concat - "tmate -S " - socket - " new-session -A -s " - user-login-name - " -n main " - "\"tmate wait tmate-ready " - "&& TMATE_CONNECT=\\$(" - "tmate display -p '#{tmate_ssh} # " - user-buffer - ".target # " - ;; would like this to be shorter - (concat - (format-time-string "%Y-%m-%d %T") - (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) - " # #{tmate_web} ') " - "; echo \\$TMATE_CONNECT " - "; (echo \\$TMATE_CONNECT | xclip -i -sel p -f | xclip -i -sel c ) 2>/dev/null " - "; echo Share the above with your friends and hit enter when done. " - "; read " - "; bash --login\"" - ) - ) - ;; at some point we can bring back working on remote hosts - (set (make-local-variable 'start-tmate-over-ssh-command) - (concat - "tmate -S " - socket - " new-session -A -s " - user-login-name - " -n main " - "\"tmate wait tmate-ready " - "\\&\\& TMATE_CONNECT=\\$\\(" - "tmate display -p '#{tmate_ssh} # " - user-buffer - ".target # " - (concat - (format-time-string "%Y-%m-%d %T") - (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) - " #{tmate_web} '\\) " - "; echo \\$TMATE_CONNECT " - "; \\(echo \\$TMATE_CONNECT \\| xclip -i -sel p -f \\| xclip -i -sel c \\) 2>/dev/null " - "; echo Share the above with your friends and hit enter when done. " - "; read " - "; bash --login\"" - ) - ) - (set (make-local-variable 'org-file-properties) - (list - (cons 'header-args:tmate - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval never-export" - " :results silent " - " :session (concat user-login-name \":main\" )" - ;; " :session (concat user-login-name \":\" " item-str ")" - " :socket " socket - " :window " user-login-name - " :terminal sakura" - " :exports code" - ;; If you want each tmate command to run from a particular directory - ;; " :prologue (concat \"cd \" ssh-dir \"\n\")" - ;; " :prologue (concat "cd " org-file-dir "\n") )) - )) - (cons 'header-args:sql-mode - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval never-export" - " :results code" - " :product postgres" - " :session data" - ;; " :session (symbol-value user-login-name)" - ;; " :session (concat user-login-name \":\" " "main" ")" - ;; " :session (concat user-login-name \":\" " item-str ")" - " :exports both" - )) - (cons 'header-args:emacs-lisp - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval never-export" - " :results code" - " :exports both" - )) - (cons 'header-args:elisp - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval never-export" - " :results code" - " :exports both" - )) - (cons 'header-args:bash - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval never-export" - " :results output code verbatis replace" - " :exports both" - " :wrap EXAMPLE" - ;; This can help catch stderr and other issues - ;; " :prologue \"exec 2>&1\n\"" - ;; " :epilogue \":\n\"" - ;; " :prologue exec 2>&1\n(\n" - ;; " :epilogue )\n:\n" - ;; If you want commands executing over tramp - ;; " :dir (symbol-value 'tmpdir)" - ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" - ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" - ;; If you want to feed an application via HEREDOC - ;; :PROPERTIES: - ;; " :prologue exec 2>&1\nbq query -n 2000 --nouse_legacy_sql < + + + + + +
+ + + + +#+end_src + +In the above, you would have ~example.cast~ in the same root folder as this ~index.html~, and it combined with the css and js would create an embedded screencast in the #cast div. You coudl then style it further as you see fit. + +** asciinema to svg +There is also a cast-to-svg tool you can use so that you can embed the recording without needing additional CSS and javascript. + +https://github.com/marionebl/svg-term-cli + +this tool takes a cast file as input and outputs a named svg. Then you'd end up with html like so: + +#+begin_src html + + + + + + + + +#+end_src + +the longer the session, the bigger the file, and so if we are meaning to do incredibly long sessions, or having multiple casts on a page, this +may not be the best option, but it could be useful for including asciinema output into an org file's exported html. + +For example, you could have an org file with a tmux block of + +#+begin_example +docker run busybox/helloworld +#+end_example + +and then below it as "results" you'd see the terminal session of /just/ this +command, showing docker pulling the file and printing out hello. + +* Capturing keyboard input. + +You can capture your key input during an asciicinema session by passing in the --stdin flag at the start of the session. +The resulting file will include a data map with the unicode characters pressed. You can then add some additional javascript to +translate that unicode to their correct symbols and then paste it somewhere on that embedded webpage. Abby does a great +breakdown of that, with an demo site, in the repo [[https://github.com/abdabTheCreator/Asciinema-keydisplay/blob/main/index.html][abdabthecreator/asciinema-keydisplay]] . + +In the demo site, the keys display above the embedded cast. Overlaying the keys onto the cast, in semi-transparent font, is just a +matter of some CSS. Essentially, you'd put both in a wrapper div, and then give the

tag showing the inputs an absolute positioning. + +* How does this fit into the doom config? +To be honest, I am not sure yet! From the start of the ticket, it seemed like we were wanting to incorporate asciinema into tmux blocks. When we run the blocks, we do it inside an asciinema session. This would be handy for saving the resulting work as a playback video of sorts in the end. + +However, there aren't really keypresses we need to catpure here. All the work is being piped in from tmux. Some of the examples seemed to imply we'd open up emacs in the tmux session, that is connected to another emacs client, and then use that parent client to pipe in keypresses via tmux to the second client. This seems unecessarily convoluted, and I'm not sure the use case. + +If we are trying to show a playback of the tmux command being run in some final html, then it feels like we'd want to wrap each tmux command in an asciicinema invocation, with the resulting file name and path being passed in from the src block. We could then just put an img tag beneath each block that has that name, using some elisp triggers and such. Is that what we are trying to achieve here? + +As it relates to the doom-config, I imagine we'd likely figure out the exact elisp we'd want to run and then add that to our existing ob-tmux library as an additional option to pass in. This seems the cleanest way. + +* Capturing key presses in an emacs org file + +The other intention I can see here is to capture our keypresses in the org file itself. We want to show how you can navigate from code block to code block and hit ctrl-c ctrl-c to trigger that codeblock. The idea being that we can create cooperation templates for learning labs, that come with our emacs and some file you are meant to read through and excute the scripts as you approach them. In this case, the person isn't trying to use emacs, emacs is more of a executable document viewer. + +If that is the case, I think it'd be better to record a screencast separate from the org file, since it is intended to show the keypresses for any org file. We could then link to that screencst in our documentation. Alternately, it is possible to present elisp functions as a link. So we could hae a link above each code block that, when clicked, runs the command "excute this code block". The person reading through the document only needs to know how to scroll and click. + +At that point, though, it might be more useful to think about a rendered website that has these links as buttons that pass the info to the emacs server which then runs the command. Then, when someone is starting up their lab, they just navigate to a given page, read the instructions, click where they're meant to, and see the output in a tmux window open on another tab. this is approaching katacode and other online labs connected to VM's...which is maybe what we want? + +In either case, this seems like work that would happen outside the doom-config, and wouldn't require recording keyboard presses in tmux blocks. + +* Current thoughts +At the moment, I would like to discuss what our hoped-for goal is, and where that work is best done. The ticket is open in doom-config, but this may not be where the work is needed? diff --git a/blog/README.md b/blog/README.md new file mode 100644 index 0000000..0d50fb4 --- /dev/null +++ b/blog/README.md @@ -0,0 +1,94 @@ +# Our Blog + +This directory holds the source and content for our site/blog, located at https://ii.coop +It is built with [hugo](https://gohugo.io), a static site generator. + +## Running the site locally + +If you are editing pages or styles, it is recommended to run the site locally to see the results of your changes before you push them. + +To do this, first install hugo following their [installation instructions](https://gohugo.io/getting-started/installing) + +If installed correctly, you can run this command: + +``` sh +hugo version +``` + +which should return something similar to: + +``` sh +hugo v0.81.0+extended darwin/amd64 BuildDate=unknown +``` + +Then clone this repo and navigate to the blog folder + +``` sh +git clone git@github.com/ii/org && cd org/blog +``` + +from here, run the hugo server + +``` sh +hugo server +``` + +this will start the site up, viewable at http://localhost:1313 + +## Running the site out of a Pair instance + +Install dependencies: + +``` +sudo apt update +sudo apt install -y hugo +``` + +Serve site: + +``` +hugo serve -b https://hugo.$SHARINGIO_PAIR_BASE_DNS_NAME/ --appendPort=false --bind 0.0.0.0 -p 443 +``` + +## Key Directory Structure for Contributors +In this directory, the main folders you are concerned about are `content` and `static` +- content :: holds all the text copy for the site, written in markdown +- static :: holds all assets (like images) and our custom CSS + +## The Content Directory +The content maps to the pages of the site. So The file located at + +`content/blog/deplying-talos-to-equinix.md` + +can be seen online at + +https://ii.coop/blog/deplying-talos-to-equinix + +Similarly `content/about.md` maps to https://ii.coop/about + +## Referencing images in posts + +You can reference images using the [hugo-flavoured markdown syntax](https://learn.netlify.app/en/cont/markdown/) + +the syntax is: + +``` md +![alt text](image_path "title text") +``` +(alt text is what will appear if the image doesnt' load, and is used for accessibility. title text is what shows upon hovering over the image. It is optional) + +Everying in our `static` folder gets put into the root of the website. So if I placed DOG_PICTURE.png at `static/images/DOG_PICTURE.png`, I would include it in my blog post with: + +``` markdown +![picture of a dog](/images/DOG_PICTURE.png) +``` + +_Note that we start with a slash, and don't include /static/_ + +Alternatively, you can refernece a picture from the web, like so: + +``` markdown +![picture of a cat](https://catpics.com/fluffy-cat.png) +``` + +` diff --git a/blog/archetypes/default.md b/blog/archetypes/default.md new file mode 100644 index 0000000..00e77bd --- /dev/null +++ b/blog/archetypes/default.md @@ -0,0 +1,6 @@ +--- +title: "{{ replace .Name "-" " " | title }}" +date: {{ .Date }} +draft: true +--- + diff --git a/blog/blog.org b/blog/blog.org new file mode 100644 index 0000000..ea6cab9 --- /dev/null +++ b/blog/blog.org @@ -0,0 +1,272 @@ +#+HUGO_BASE_DIR: ./ +#+HUGO_SECTION: ./blog +#+HUGO_WEIGHT: auto +#+HUGO_AUTO_SET_LASTMOD: t +* Welcome to the blog scroll! Wanna contribute? Read this first! +This org file contains all the blog posts published to [[https://blog.ii.coop]]. +** About the Blog +Our blog is made with [[https://gohugo.io][hugo]], a static site generator. All necessary code and content for the site are contained in this ~/blog~ directory. +Hugo parses the markdown files contained in [[file:content/][/content]] , with all our blog posts held in [[file:content/posts/][/content/posts]]. + +NOTE: While hugo can also parse org, we felt it best to use the syntax it is designed for. + +We generate our markdown with [[file:~/humacs/doom-emacs/modules/lang/org/README.org::*Module Flags][org-export]]. This is done intentionally from within this file. After exporting a blog article, you can commit and push your changes to see it go live after a minute or two at blog.ii.coop. + +** Adding a new blog post +This file is organized by categories, like [[Guides]]. To add a new blog post, create a newline beneath a category heading, then type ~ + +During the MTN project, Bob shipped servers to Hippie's home in Portland, +Oregon, to help with the network-booting work. When Hippie later moved to +Aotearoa New Zealand, he was unable to bring the servers with them. Hippie then +asked Bob if he had any money available to help relocate the servers to Aotearoa +New Zealand. Bob introduced Hippie to someone who could help. This person was +[Dan Kohn](https://en.wikipedia.org/wiki/Dan_Kohn). Bob introduced the two of +them, telling Dan about Hippie's work and values. A relationship was formed, the +servers were relocated, and are still used as part of the infrastructure for +Hippie's vision. + +## Balena and Cross-Cloud + +Now in Aotearoa New Zealand, Hippie began doing work for [Balena](https://www.balena.io/) +with [Denver Williams](https://github.com/denverwilliams). This technology +enabled the "image pull and networking boot" in new, exciting ways. Inspired, +Hippie set to use this technology with Kubernetes, by pulling down the images +and installing [GitLab](https://about.gitlab.com) on top of it. He showed a demo +of this to Dan who, impressed by this innovative approach to using Kubernetes, +asked Hippie if he'd like to join the CNCF to help demonstrate what the +orginzation is capable of. Hippie jumped in, helping to create a [Demo of CNCF +technologies](https://github.com/cncf/demo/). + +While the demo did not get much traction, it was foundational in exploring +ideas which evolved into Hippie's next project with CNCF. Dan had a vision to +get all cloud providers actively engaged in the cloud-native experience. This +vision manifested as [Cross-Cloud](https://github.com/crosscloudci/cross-cloud), +a way to concretely show the work of CNCF through a web +frontend([cncf.ci](https://cncf.ci/)) that showed all the projects available on +participating cloud providers. [Taylor Carpenter](https://github.com/taylor), a +long time friend, continued the project with the [vulk.coop](https://vulk.coop) +team and Hippie moved on to tackle new challenges with Dan. + +## The start of Conformance + +In 2017 Hippie attended his first CloudNativeCon + KubeCon Europe in Berlin, +Germany. It was here that Dan introduced the Kubernetes conformance standard, +and the Kubernetes Certified Service Provider program. + +A year later, in February 2018 [Kenichi Omichi](https://github.com/oomichi) dug +into the Kubernetes logs and found that Kubernetes had 481 API endpoints, only 53 +of them were covered by tests. Dan knew that for the [Certified Kubernetes +brand](https://github.com/cncf/k8s-conformance/pulls#certified-kubernetes) to +have meaning, they needed to invest to make sure test coverage was much higher. +The problem was not just a lack of tests, but a lack of visibility--it required +heavy, manual data mining just to calculate coverage in the first place. + +To find a solution to this problem, Hippie paired with [Rohan +Fletcher](https://github.com/rohfle). During a discussion with Hippie and Rohan, +Dan showed them a disk usage graph for OSX and proposed they use a similar graph +to visualize Conformance coverage of the Kubernetes API. + +![disk_graph](/images/blog_image/disk_graph.png "The actual screenshot Dan +shared to show his vision") The actual screenshot Dan shared to show his vision + +## APISnoop + +Rohan started on the project to create what would be +[APISnoop](https://apisnoop.cncf.io/): a visual insight into Kubernetes test +coverage. This project was well-received by the community when it was introduced +at Kubecon Europe 2018. + +That same year, [Zach Mandeville](https://github.com/zachmandeville) joined the +team and took over development of APISnoop. At that point there was no real +automation for generating Snoop's data. Much was still done by manually looking +for tests in audit event logs. Zach did a lot of the writing, rewriting, and +architectural changes to APISnoop, along with driving updates of underlying +Kubernetes logging so that clear coverage information could be distinguished +from noise. + +While APISnoop worked to show gaps in Conformance coverage, a separate effort +was started to fill those gaps with tests. Unfortunately, this effort was slow +going. After about a year, APISnoop showed very little movement on the graph, as +the test writing efforts yielded very little results. Within character, Dan +started to look for other ways to get an increase in coverage happening at a +rate that satisfied his vision. + +Since Hippie and his team had been looking at the Kubernetes API and all its +underlying parts for almost a year, it was a logical fit for them to step up to +the test-writing efforts, and work to increase the test writing velocity. + +## Test-Writing + +The writing of tests was a learning experience for everyone in ii, as well as +the contributors in the Kubernetes community. Initially, the process was to +quietly work on tests, ensuring they fit all known requirements and then, once +they seemed ready, to share it with the community through a PR. These PR's +invariably sparked feedback and needed revisions, and could lead to discussions +on whether the particular test meaningfully changed the coverage at all. The +process for writing, rewriting, and collaborating on tests was slower than +desired. + +Hippie and the ii team-- notably Devan Carpenter, [Caleb +Woodbine](https://github.com/bobymcbobs), and [Stephen +Heywood](https://github.com/heyste)-- came up with the mock test concept, using +org-mode in their own flavor of [Emacs (Humacs)](http://humacs.org/) +deployed on a Kubernetes cluster. Zach decoupled the APISNoop database from the +app , so that it could be deployed to the cluster and used as part of a +test-writing environment. This allowed for tighter feedback loops, with test +writers able to immediately see whether their mock test hit the endpoints they +expected. These mock tests, along with their projected results as calculated by +APISnoop, were then presented at the SIG Architecture Conformance sub-project +meetings for initial approval, before creating the actual tests and pull +requests. + +The Kubernetes project is a complex organism, with a vast community and diverse +[Special Interest Groups](https://github.com/kubernetes/community) (SIG’s). +Conformance works across all these organizational levels, and reaching consensus +on it is an equally complex task. The collaborative test-writing method of +Hippie’s team helped increase the velocity and transparency of test writing. + + + + +## Iterations and Automations + +Dan was always pushing the bar higher, continually expecting better and clearer +results out of APISnoop so it would be a well-used tool throughout the +Kubernetes community. This inspired Zach and ii to continually improve its +functionality, changing APISnoop from a static page showing only the current +status of conformance, to a multi-faceted tool with multiple ways to view not +just the current data, but the historical progress of Conformance. Hippie +developed a unique Kind+APISnoop combination that allowed anyone to access the +querying power of APISnoop locally. + +![1_15Cover](/images/blog_image/1_15Cover.png "1.15 Cover") Test Cover 1.15 + +![1_21Cover](/images/blog_image/1_21cover.png "1.21 Cover") Test cover 1.21 + +![alt_text](/images/blog_image/Conformance-progress.png "conformance-progress") +Conformance Progress graph + +The increased conformance coverage was great news, and added the expected value +to the Certified Kubernetes brand. At the same time, it was making life +increasingly difficult for [Taylor Waggoner](https://github.com/taylorwaggoner) +at the CNCF to manage the Conformance Certification process. All pull requests +for certification had to be manually verified to ensure it contained all tests, +but the list of conformance tests kept growing with each release. + +Dan approached Hippie and requested the process to be automated, and in 2020 the +CNCF CI Bot was created by [Berno Kleinhans ](https://github.com/bernokl)and +[Rob Kielty](https://github.com/RobertKielty). This allowed for the automatic +checking and labeling of conformance pull requests, speeding up the process and +reducing the human effort needed. + +## Thank you + +The Kubernetes Conformance journey up to this day is an eventful one, built +around relationships and community cooperation, with many different contributors +playing their part to help move this forward. All of this was made possible by +the extraordinary vision and leadership of a friend dear to everyone in the +Kubernetes community: Dan Kohn. + +![Dan_Kohn](/images/blog_image/dan_kohn.jpg "Dan Kohn") diff --git a/blog/content/blog/creating-an-e2e-test-for-conformance.md b/blog/content/blog/creating-an-e2e-test-for-conformance.md new file mode 100644 index 0000000..e5affbc --- /dev/null +++ b/blog/content/blog/creating-an-e2e-test-for-conformance.md @@ -0,0 +1,220 @@ ++++ +title = "Creating an e2e test for Conformance" +author = ["Stephen Heywood"] +date = 2021-05-11 +lastmod = 2021-05-13T09:02:00+13:00 +categories = ["kubernetes"] +draft = false +summary = "Finding untested stable endpoints and creating an e2e test for conformance." ++++ + +## Introduction + +Since the 1.19 release of Kubernetes, the gap in e2e conformance tested endpoints has decreased due in part to the processes and tooling that the team at [ii.coop](https://ii.coop/) have developed. + +![img](/images/2021/apisnoop-progress.png) + +The process starts by using [APIsnoop](https://github.com/cncf/apisnoop) (which uses a postgres database containing audit logs from e2e test runs) to identify a set of untested endpoints that are part of the stable API endpoints. During this process various groups or patterns of endpoints are discovered. One such group of endpoints are “DaemonSetStatus”. Next we will explore these endpoints, create an e2e test that exercises each of them, then merge this test into the k8s repo. + +APIsnoop results for untested “DaemonSetStatus” endpoints in [untested_stable_endpoints table](https://github.com/cncf/apisnoop/blob/main/apps/snoopdb/tables-views-functions.org#untested_stable_endpoints) + +```sql + select + endpoint, + path, + kind + from testing.untested_stable_endpoint + where eligible is true + and endpoint ilike '%DaemonSetStatus' + order by kind, endpoint desc; +``` + + +``` + endpoint | path | kind +------------------------------------------+---------------------------------------------------------------+------------ + replaceAppsV1NamespacedDaemonSetStatus | /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status | DaemonSet + readAppsV1NamespacedDaemonSetStatus | /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status | DaemonSet + patchAppsV1NamespacedDaemonSetStatus | /apis/apps/v1/namespaces/{namespace}/daemonsets/{name}/status | DaemonSet + (3 rows) +``` + + +# Connecting an endpoint to a resource + +Here are three possible ways use to connect an API endpoint to a resource in a cluster + +1. Some initial details about the endpoint can be found via the [API Reference](https://kubernetes.io/docs/reference/kubernetes-api/). For this example about Daemonset we can locate [read](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#get-read-status-of-the-specified-daemonset), [patch](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#patch-partially-update-status-of-the-specified-daemonset) and [replace](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#update-replace-status-of-the-specified-daemonset) for Daemonset Status. + +2. `kubectl` has an option to describe the fields associated with each supported API resource. The following example shows how it can provide details around ’status conditions’. + + ``` + $ kubectl explain daemonset.status.conditions + KIND: DaemonSet + VERSION: apps/v1 + + RESOURCE: conditions <[]Object> + + DESCRIPTION: + Represents the latest available observations of a DaemonSet's current + state. + + DaemonSetCondition describes the state of a DaemonSet at a certain point. + + FIELDS: + lastTransitionTime + Last time the condition transitioned from one status to another. + + message + A human readable message indicating details about the transition. + + reason + The reason for the condition's last transition. + + status -required- + Status of the condition, one of True, False, Unknown. + + type -required- + Type of DaemonSet condition. + ``` + +3. Lastly, using both [APIsnoop in cluster](https://github.com/cncf/apisnoop/tree/main/kind) while reviewing the current [e2e test suite](https://github.com/kubernetes/kubernetes/tree/master/test/e2e) for existing conformance tests that test a similar set of endpoints. In this case we used [a Service Status test](https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/test/e2e/network/service.go#L2300-L2392) as a template for the new Daemonset test. + + ```sql + with latest_release as ( + select release::semver as release + from open_api + order by release::semver desc + limit 1 + ) + + select ec.endpoint, ec.path, ec.kind + from endpoint_coverage ec + join latest_release on(ec.release::semver = latest_release.release) + where level = 'stable' + and ec.endpoint ilike '%NamespacedServiceStatus' + and tested is true + ORDER BY endpoint desc; + ``` + + + ``` + endpoint | path | kind + --------------------------------------+-------------------------------------------------------+--------- + replaceCoreV1NamespacedServiceStatus | /api/v1/namespaces/{namespace}/services/{name}/status | Service + readCoreV1NamespacedServiceStatus | /api/v1/namespaces/{namespace}/services/{name}/status | Service + patchCoreV1NamespacedServiceStatus | /api/v1/namespaces/{namespace}/services/{name}/status | Service + (3 rows) + ``` + + The Service status e2e test followed similar ideas and patterns from [/test/e2e/auth/certificates.go](https://github.com/kubernetes/kubernetes/blob/31030820be979ea0b2c39e08eb18fddd71f353ed/test/e2e/auth/certificates.go#L356-L383) and [/test/e2e/network/ingress.go](https://github.com/kubernetes/kubernetes/blob/31030820be979ea0b2c39e08eb18fddd71f353ed/test/e2e/network/ingress.go#L1091-L1127) + +# Writing an e2e test + +## Initial Exploration + +Using [literate programming](https://wiki.c2.com/?LiterateProgramming) we created [Appsv1DaemonSetStatusLifecycleTest.org](https://github.com/apisnoop/ticket-writing/blob/create-daemonset-status-lifecycle-test/Appsv1DaemonSetStatusLifecycleTest.org) +(via [pair](https://github.com/sharingio/pair)) to both test and document the explorations of the endpoints. This provides a clear outline that should be easily replicated and validated by others as needed. +Once completed, the document is converted into markdown which becomes a GitHub [issue](https://github.com/kubernetes/kubernetes/issues/100437). + +The issue provides the following before a PR is opened: +- a starting point to discuss the endpoints +- the approach taken to test them +- whether they are [eligible for conformance](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md#conformance-test-requirements). + +## Creating the e2e test + +Utilizing the above document, the test is structured in to four parts; + +1. Creating the resources for the test, in this case a DaemonSet and a ’watch’. + +2. Testing the first endpoint, `readAppsV1NamespacedReplicaSetStatus` via a [dynamic client](https://github.com/ii/kubernetes/blob/ca3aa6f5af1b545b116b52c717b866e43c79079b/test/e2e/apps/daemon_set.go#L841). This is due to the standard go client not being able to access the sub-resource. We also make sure there are no errors from either getting or decoding the response. + +3. The next endpoint tested is `replaceAppsV1NamespacedDaemonSetStatus` which replaces all status conditions at the same time. As the resource version of the DaemonSet may change before the new status conditions are updated we may need to [retry the request if there is a conflict](https://github.com/ii/kubernetes/blob/ca3aa6f5af1b545b116b52c717b866e43c79079b/test/e2e/apps/daemon_set.go#L854). Monitoring the watch events for the Daemonset we can confirm that the status conditions have been [replaced](https://github.com/ii/kubernetes/blob/ca3aa6f5af1b545b116b52c717b866e43c79079b/test/e2e/apps/daemon_set.go#L884-L886). + +4. The last endpoint tested is `patchAppsV1NamespacedReplicaSetStatus` which only patches a [single condition](https://github.com/ii/kubernetes/blob/ca3aa6f5af1b545b116b52c717b866e43c79079b/test/e2e/apps/daemon_set.go#L906) this time. Again, using the watch to monitor for events we can check that the single condition [has been updated](https://github.com/ii/kubernetes/blob/ca3aa6f5af1b545b116b52c717b866e43c79079b/test/e2e/apps/daemon_set.go#L931). + +## Validating the e2e test + +Using `go test` we can run a single test for quick feedback + +```bash +cd ~/go/src/k8s.io/kubernetes +TEST_NAME="should verify changes to a daemon set status" +go test ./test/e2e/ -v -timeout=0 --report-dir=/tmp/ARTIFACTS -ginkgo.focus="$TEST_NAME" +``` + +Checking the e2e test logs we see that everything looks to be okay. + +``` +[It] should verify changes to a daemon set status /home/ii/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:812 +STEP: Creating simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +May 10 17:36:36.106: INFO: Number of nodes with available pods: 0 +May 10 17:36:36.106: INFO: Node heyste-control-plane-fkjmr is running more than one daemon pod +May 10 17:36:37.123: INFO: Number of nodes with available pods: 0 +May 10 17:36:37.123: INFO: Node heyste-control-plane-fkjmr is running more than one daemon pod +May 10 17:36:38.129: INFO: Number of nodes with available pods: 0 +May 10 17:36:38.129: INFO: Node heyste-control-plane-fkjmr is running more than one daemon pod +May 10 17:36:39.122: INFO: Number of nodes with available pods: 1 +May 10 17:36:39.122: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Getting /status +May 10 17:36:39.142: INFO: Daemon Set daemon-set has Conditions: [] +STEP: updating the DaemonSet Status +May 10 17:36:39.160: INFO: updatedStatus.Conditions: []v1.DaemonSetCondition{v1.DaemonSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:v1.Time{Time:time.Ti +me{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the daemon set status to be updated +May 10 17:36:39.163: INFO: Observed event: ADDED +May 10 17:36:39.163: INFO: Observed event: MODIFIED +May 10 17:36:39.163: INFO: Observed event: MODIFIED +May 10 17:36:39.164: INFO: Observed event: MODIFIED +May 10 17:36:39.164: INFO: Found daemon set daemon-set in namespace daemonsets-2986 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template +.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +May 10 17:36:39.164: INFO: Daemon set daemon-set has an updated status +STEP: patching the DaemonSet Status +STEP: watching for the daemon set status to be patched +May 10 17:36:39.180: INFO: Observed event: ADDED +May 10 17:36:39.180: INFO: Observed event: MODIFIED +May 10 17:36:39.181: INFO: Observed event: MODIFIED +May 10 17:36:39.181: INFO: Observed event: MODIFIED +May 10 17:36:39.181: INFO: Observed daemon set daemon-set in namespace daemonsets-2986 with annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +May 10 17:36:39.181: INFO: Found daemon set daemon-set in namespace daemonsets-2986 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusPatched True 0001-01-01 00:00:00 +0000 UTC }] +May 10 17:36:39.181: INFO: Daemon set daemon-set has a patched status +``` + +Verification that the test passed! + +``` +Ran 1 of 5745 Specs in 18.473 seconds +SUCCESS! -- 1 Passed | 0 Failed | 0 Pending | 5744 Skipped +--- PASS: TestE2E (18.62s) +``` + +Using APISnoop with the audit logger we can also confirm that the endpoints where hit during the test. + +```sql +select distinct endpoint, right(useragent,75) AS useragent +from testing.audit_event +where endpoint ilike '%DaemonSetStatus%' +and release_date::BIGINT > round(((EXTRACT(EPOCH FROM NOW()))::numeric)*1000,0) - 60000 +and useragent like 'e2e%' +order by endpoint; +``` + +``` + endpoint | useragent +-----------------------------------------+------------------------------------------------------------------- + patchAppsV1NamespacedReplicaSetStatus | [sig-apps] ReplicaSet should validate Replicaset Status endpoints + readAppsV1NamespacedReplicaSetStatus | [sig-apps] ReplicaSet should validate Replicaset Status endpoints + replaceAppsV1NamespacedReplicaSetStatus | [sig-apps] ReplicaSet should validate Replicaset Status endpoints +(3 rows) +``` + +Even though the test has passed here, once merged it will join other jobs on [TestGrid](https://testgrid.k8s.io/) to determine if the test is stable and after two weeks it can be [promoted to conformance](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md#promoting-tests-to-conformance). + + +# Final Thoughts + +The current workflow and tooling provides a high level of confidence when working through each e2e test. Following agreed coding patterns, styles and processes helps to minimise possible issues and test flakes. There’s always opportunities to get support through GitHub tickets, [various Kubernetes slack channels](https://kubernetes.slack.com/messages/k8s-conformance) and conformance meetings. + +Every e2e test that’s merged and then promoted to conformance requires the input from a wide range of people. It is thanks to the support from community reviewers, SIGs and the direction provided by SIG-Architecture this work is not just possible but rewarding. diff --git a/blog/content/blog/deplying-talos-to-equinix.md b/blog/content/blog/deplying-talos-to-equinix.md new file mode 100644 index 0000000..7826c45 --- /dev/null +++ b/blog/content/blog/deplying-talos-to-equinix.md @@ -0,0 +1,762 @@ ++++ +title = "Deploying Talos to Equinix" +author = ["Caleb Woodbine", "Andrew Rynhard"] +date = 2021-02-03 +lastmod = 2021-03-10T15:48:17+13:00 +tags = ["kubernetes", "equinix", "talos", "org"] +categories = ["guides"] +draft = false +summary = "From nodes to workloads on baremetal" ++++ + +## Introduction {#introduction} + +In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API. + +What is [Cluster-API](https://cluster-api.sigs.k8s.io/)? +: + +> Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. + +What is [Talos](https://www.talos.dev/)? +: + +> Talos is a modern OS designed to be secure, immutable, and minimal. + +What is [Equinix Metal](https://metal.equinix.com/)? +: + +> A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes. + +The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities. + +Why is this important? +: In general: Orchestrating a container based OS such as Talos ([Flatcar](http://flatcar-linux.org/), [Fedora CoreOS](https://getfedora.org/coreos/), or [RancherOS](https://rancher.com/products/rancher/)) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge. + + +## Dependencies {#dependencies} + +What you'll need for this guide: + +- [talosctl](https://github.com/talos-systems/talos/releases/tag/v0.8.1) + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +- [packet-cli](https://github.com/packethost/packet-cli) + +- the ID and API token of existing Equinix Metal project + +- an existing Kubernetes cluster with a public IP (such as [kind](http://kind.sigs.k8s.io/), [minikube](https://minikube.sigs.k8s.io/), or a cluster already on Equinix Metal) + + +## Prelimiary steps {#prelimiary-steps} + +In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via `packet-cli`. + +Set the correct project to create and manage resources in: + +```tmate + read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID +``` + +The API key for your account or project: + +```tmate + read -p 'PACKET_API_KEY: ' PACKET_API_KEY +``` + +Export the variables to be accessible from `packet-cli` and `clusterctl` later on: + +```tmate + export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY +``` + +In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. +We'll need this IP address later for use in booting the servers. +If you have set up your existing cluster differently, it'll just need to be an IP that we can use. + +```tmate + export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" +``` + + +## Setting up Cluster-API {#setting-up-cluster-api} + +Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster: + +```tmate + clusterctl init -b talos -c talos -i packet +``` + +This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider. + +****Important**** note: + +- the `bootstrap-talos` controller in the `cabpt-system` namespace must be running a version greater than `v0.2.0-alpha.8`. The version can be displayed in with `clusterctl upgrade plan` when it's installed. + + +## Setting up Matchbox {#setting-up-matchbox} + +Currently, since Equinix Metal have ****not**** yet added support for Talos, it is necessary to install [Matchbox](https://matchbox.psdn.io/) to boot the servers (There is an [issue](https://github.com/packethost/packet-images/issues/26) in progress and [feedback](https://feedback.equinixmetal.com/operating-systems/p/talos-as-officially-supported-operating-system) for adding support). + +What is Matchbox? +: + +> Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. + +Here is the manifest for a basic matchbox installation: + +```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: matchbox + spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets + --- + apiVersion: v1 + kind: Service + metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 +``` + +Save it as `matchbox.yaml` + +The manifests above were inspired by the manifests in the [matchbox repo](https://github.com/poseidon/matchbox/tree/master/contrib/k8s). +For production it might be wise to use: + +- an Ingress with full TLS +- a ReadWriteMany storage provider instead hostPath for scaling + +With the manifests ready to go, we'll install Matchbox into the `matchbox` namespace on the existing cluster with the following commands: + +```tmate + kubectl create ns matchbox + kubectl -n matchbox apply -f ./matchbox.yaml +``` + +You may need to patch the `Service.spec.externalIPs` to have an IP to access it from if one is not populated: + +```tmate + kubectl -n matchbox patch \ + service matchbox \ + -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}" +``` + +Once the pod is live, We'll need to create a directory structure for storing Talos boot assets: + +```tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos +``` + +Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.8.1 into the assets folder: + +```tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + wget -P /var/lib/matchbox/assets/talos \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/initramfs-amd64.xz \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/vmlinuz-amd64 +``` + +Now that the assets have been downloaded, run a checksum against them to verify: + +```tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c "cd /var/lib/matchbox/assets/talos && \ + wget -O- https://github.com/talos-systems/talos/releases/download/v0.8.1/sha512sum.txt 2> /dev/null \ + | sed 's,_out/,,g' \ + | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \ + | sha512sum -c -" +``` + +Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it: + +```tmate + export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}') +``` + +[Profiles in Matchbox](https://matchbox.psdn.io/matchbox/#profiles) are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as `profile-default-amd64.json` + +```json + { + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } + } +``` + +[Groups in Matchbox](https://matchbox.psdn.io/matchbox/#groups) are a way of letting servers pick up profiles based on selectors. Save this file as `group-default-amd64.json` + +```json + { + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } + } +``` + +We'll copy the profile and group into their respective folders: + +```tmate + kubectl -n matchbox \ + cp ./profile-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json + kubectl -n matchbox \ + cp ./group-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json +``` + +List the files to validate that they were written correctly: + +```tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c 'ls -alh /var/lib/matchbox/*/' +``` + + +### Testing Matchbox {#testing-matchbox} + +Using `curl`, we can verify Matchbox's running state: + +```tmate + curl http://$LOAD_BALANCER_IP:8080 +``` + +To test matchbox, we'll create an invalid userdata configuration for Talos, saving as `userdata.txt`: + +```text +#!talos +``` + +Feel free to use a valid one. + +Now let's talk to Equinix Metal to create a server pointing to the Matchbox server: + +```tmate + packet-cli device create \ + --hostname talos-pxe-boot-test-1 \ + --plan c1.small.x86 \ + --facility sjc1 \ + --operating-system custom_ipxe \ + --project-id "$PACKET_PROJECT_ID" \ + --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \ + --userdata-file=./userdata.txt +``` + +In the meanwhile, we can watch the logs to see how things are: + +```tmate + kubectl -n matchbox logs deployment/matchbox -f --tail=100 +``` + +Looking at the logs, there should be some get requests of resources that will be used to boot the OS. + +Notes: + +- fun fact: you can run Matchbox on Android using [Termux](https://f-droid.org/en/packages/com.termux/). + + +## The cluster {#the-cluster} + + +### Preparing the cluster {#preparing-the-cluster} + +Here we will declare the template that we will shortly generate our usable cluster from: + +```yaml + kind: TalosControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/packethost/packet-ccm/releases/download/v1.1.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "${CLUSTER_NAME}" + spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + metadata: + name: "${CLUSTER_NAME}" + spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: MachineDeployment + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + spec: + template: + spec: + generateType: init +``` + +Inside of `TalosControlPlane.spec.controlPlaneConfig.init`, I'm very much liking the use of `generateType: init` paired with `configPatches`. This enables: + +- configuration to be generated; +- management of certificates out of the cluster operator's hands; +- another level of standardisation; and +- overrides to be added where needed + +Notes: + +- the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0 + + +#### Templating your configuration {#templating-your-configuration} + +Set environment variables for configuration: + + +```bash + export CLUSTER_NAME="talos-metal" + export FACILITY=sjc1 + export KUBERNETES_VERSION=v1.20.2 + export POD_CIDR=10.244.0.0/16 + export SERVICE_CIDR=10.96.0.0/12 + export CONTROLPLANE_NODE_TYPE=c1.small.x86 + export CONTROL_PLANE_MACHINE_COUNT=3 + export WORKER_NODE_TYPE=c1.small.x86 + export WORKER_MACHINE_COUNT=0 + export SSH_KEY="" + export IPXE_URL=$LOAD_BALANCER_IP +``` + +In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads. + + +#### Render the manifests {#render-the-manifests} + +Render your cluster configuration from the template: + +```tmate + clusterctl config cluster "$CLUSTER_NAME" \ + --from ./talos-packet-cluster-template.yaml \ + -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +``` + + +### Creating the cluster {#creating-the-cluster} + +With the template for the cluster rendered to how wish to deploy it, it's now time to apply it: + +```tmate + kubectl create ns "$CLUSTER_NAME" + kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml +``` + +The cluster will now be brought up, we can see the progress by taking a look at the resources: + +```tmate + kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters +``` + +Note: As expected, the cluster may take some time to appear and be accessible. + +Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with: + +```tmate + kubectl -n "$CLUSTER_NAME" get secrets \ + "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \ + | base64 -d > $HOME/.kube/"$CLUSTER_NAME" +``` + +Using the KubeConfig from the new cluster, check out the status of it: + +```tmate + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info +``` + +Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal: + +```tmate + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \ + create secret generic packet-cloud-config \ + --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}" +``` + +Since we're able to talk to the APIServer, we can check how all Pods are doing: + + +```bash + export CLUSTER_NAME="talos-metal" + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\ + -n kube-system get pods +``` + +Listing Pods shows that everything is live and in a good state: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-5b55f9f688-fb2cb 1/1 Running 0 25m +kube-system coredns-5b55f9f688-qsvg5 1/1 Running 0 25m +kube-system kube-apiserver-665px 1/1 Running 0 19m +kube-system kube-apiserver-mz68q 1/1 Running 0 19m +kube-system kube-apiserver-qfklt 1/1 Running 2 19m +kube-system kube-controller-manager-6grxd 1/1 Running 0 19m +kube-system kube-controller-manager-cf76h 1/1 Running 0 19m +kube-system kube-controller-manager-dsmgf 1/1 Running 0 19m +kube-system kube-flannel-brdxw 1/1 Running 0 24m +kube-system kube-flannel-dm85d 1/1 Running 0 24m +kube-system kube-flannel-sg6k9 1/1 Running 0 24m +kube-system kube-proxy-flx59 1/1 Running 0 24m +kube-system kube-proxy-gbn4l 1/1 Running 0 24m +kube-system kube-proxy-ns84v 1/1 Running 0 24m +kube-system kube-scheduler-4qhjw 1/1 Running 0 19m +kube-system kube-scheduler-kbm5z 1/1 Running 0 19m +kube-system kube-scheduler-klsmp 1/1 Running 0 19m +kube-system packet-cloud-controller-manager-77cd8c9c7c-cdzfv 1/1 Running 0 20m +kube-system pod-checkpointer-4szh6 1/1 Running 0 19m +kube-system pod-checkpointer-4szh6-talos-metal-control-plane-j29lb 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j 1/1 Running 0 19m +``` + +With the cluster live, it's now ready for workloads to be deployed! + + +## Talos Configuration {#talos-configuration} + +In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use. + +Create the directory for the config: + +```tmate + mkdir -p $HOME/.talos +``` + +Discover the IP for the first controlPlane: + +```tmate + export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \ + get machines \ + $(kubectl -n "$CLUSTER_NAME" \ + get machines -l cluster.x-k8s.io/control-plane='' \ + --no-headers --output=jsonpath='{.items[0].metadata.name}') \ + -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}') +``` + +Fetch the `talosconfig` from the existing cluster: + +```tmate + kubectl get talosconfig \ + -n $CLUSTER_NAME \ + -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \ + -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml +``` + +Write in the configuration the endpoint IP and node IP: + +```tmate + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config endpoint $TALOS_ENDPOINT + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config node $TALOS_ENDPOINT +``` + +Now that the `talosconfig` has been written, try listing all containers: + + +```bash + export CLUSTER_NAME="talos-metal" + # removing ip; omit ` | sed ...` for regular use + talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x "/ +``` + +Here's the containers running on this particular node, in containerd (not k8s related): + +```bash +NODE NAMESPACE ID IMAGE PID STATUS +x.x.x.x system apid talos/apid 3046 RUNNING +x.x.x.x system etcd gcr.io/etcd-development/etcd:v3.4.14 3130 RUNNING +x.x.x.x system networkd talos/networkd 2879 RUNNING +x.x.x.x system routerd talos/routerd 2888 RUNNING +x.x.x.x system timed talos/timed 2976 RUNNING +x.x.x.x system trustd talos/trustd 3047 RUNNING +``` + + +## Clean up {#clean-up} + +Tearing down the entire cluster and resources associated with it, can be achieved by + +1. Deleting the cluster: + + + +```tmate + kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME" +``` + +ii. Deleting the namespace: + +```tmate + kubectl delete ns "$CLUSTER_NAME" +``` + +iii. Removing local configurations: + +```tmate + rm \ + $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + $HOME/.kube/"$CLUSTER_NAME" +``` + + +## What have I learned from this? {#what-have-i-learned-from-this} + +(always learning) how wonderful the Kubernetes community is +: there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group. + +how modular Cluster-API is +: Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways. + + +## Credits {#credits} + +Integrating Talos into this project would not be possible without help from [Andrew Rynhard (Talos Systems)](https://github.com/andrewrynhard), huge thanks to him for reaching out for pairing and co-authoring. + + +## Notes and references {#notes-and-references} + +- with the new cluster's controlPlane live and available for deployment, the iPXE server could be moved into that cluster - meaning that new servers boot from the cluster that they'll join, making it almost self-contained +- cluster configuration as based off of [cluster-template.yaml from the cluster-api-provider-packet repo](https://github.com/kubernetes-sigs/cluster-api-provider-packet/blob/479faf06e1337b1e979cb624ca8be015b2a89cde/templates/cluster-template.yaml) +- this post has been made to [blog.calebwoodine.com](https://blog.calebwoodbine.com/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal), and [talos-system.com/blog](https://ii.coop/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal/), but is also available as an [Org file](https://github.com/ii/org/blob/master/ii/equinix-metal-capi-talos-kubernetes/README.org) + +--- + +Hope you've enjoyed the output of this project! +Thank you! diff --git a/blog/content/blog/digital-ocean-apiv2--in-chef-provisioning.md b/blog/content/blog/digital-ocean-apiv2--in-chef-provisioning.md new file mode 100644 index 0000000..69d66b3 --- /dev/null +++ b/blog/content/blog/digital-ocean-apiv2--in-chef-provisioning.md @@ -0,0 +1,46 @@ ++++ +title = "Digital Ocean APIv2 in chef-provisioning" +date = 2015-10-30 +author = ["Hippie Hacker"] +lastmod = "Fri Oct 30 03:54:18 NZDT 2015" +summary = "Digital ocean is sunsetting api v1 on November 9th. It's taken a while, but @geemus released support in fog v1.35.0" ++++ + + +Digital Ocean is [sunsetting api v1](https://developers.digitalocean.com/documentation/changelog/api-v1/sunsetting-api-v1/) on November 9th. It's taken a while, but @geemus [released support in fog v1.35.0](https://github.com/fog/fog/issues/3419#issuecomment-149700617) + +I was able to take that work and [update chef-provisioning-fog](https://github.com/chef/chef-provisioning-fog/issues/119#issuecomment-152188977) which we can now use to chef-provision on digital ocean after next week. + +The branch is at [ii/chef-provisioning-fog apiv2](https://github.com/ii/chef-provisioning-fog/tree/do_api_v2) + +Run ```chef-client -z my_do_server.rb``` on the following ruby: + +```ruby +with_driver 'fog:DigitalOcean', compute_options: { + digitalocean_token: ENV['DOTOKEN'] +# digitalocean_api_key: 'V1_DEPRECATED', +# digitalocean_client_id: 'V1_DEPRECATED', + } +# Adding compute_options here seems broken ^^^^ +# so make sure and add driver_options to knife.rb + +# put this in knife.rb +#knife[:digital_ocean_access_token] = ENV['DOTOKEN'] +#driver_options compute_options: {digitalocean_token: ENV['DOTOKEN']} + +with_machine_options bootstrap_options: { + image_distribution: 'Ubuntu', + image_name: '14.04 x64', + region_name: 'New York 3', + flavor_name: '2GB' + tags: { + 'ii' => 'lovesyou' + }, + } + +with_chef_server 'https://api.chef.io/organizations/ii', + :client_name => Chef::Config[:node_name], + :signing_key_filename => Chef::Config[:client_key] + +machine 'do.ii.delivery' +``` diff --git a/blog/content/blog/flashing-hardware-with-software-from-the-web.md b/blog/content/blog/flashing-hardware-with-software-from-the-web.md new file mode 100644 index 0000000..7578fb9 --- /dev/null +++ b/blog/content/blog/flashing-hardware-with-software-from-the-web.md @@ -0,0 +1,74 @@ ++++ +title = "Flashing hardware with software from the web" +author = ["Hippie Hacker"] +date = 2017-01-04 +lastmod = 2017-01-04T15:00:02+13:00 +tags = ["kubernetes", "cloud"] +categories = ["guides"] +draft = false +summary = "It's possible to flash hardware from the web" ++++ + + +## It's possible to flash hardware from the web + +Getting custom software onto hardware is hard. Particularly if you want people to be able to modify, change, and share that software. It can be as easy visiting a web page, connecting our devices, and loading our customized versions. + +[flash.getchip.com](http://flash.getchip.com/) +uses the [C.H.I.P. Flasher +chrome-plugin](https://chrome.google.com/webstore/detail/chip-flasher/bpohdfcdfghdcgflomadkijfdgalcgoi) + +Most AllWinner [boot roms](http://linux-sunxi.org/BROM) detect when a uboot pin +is connected to ground. This forces the devices into [FEL +mode](http://linux-sunxi.org/FEL) to support flashing. On the +[C.H.I.P.](https://getchip.com/pages/chip), a physical paperclip can be used to +short the uboot/FEL pin. + +## A paperclip or jumper wire is used to turn on write-mode + +![jumper wire in correct place in chip](/images/2017/01/uboot_fel_jumper.jpg) + +This allows the website + plugin to detect the C.H.I.P. and present valid images for flashing. + +![image](/images/2017/01/Flasher-CHIP-Detected.png) + +![image](/images/2017/01/Flasher-CHIP-details.png) + +The images can be saved to disk and the plugin can be used offline to flash again. + +> Flashing will download between 250MB-625MB of data. If you have a slow internet connection, it can take a very long time to finish. If your connection is slow, we recommend that you click on the "cloud" icon in the upper-right corner of the image you want, and download the file first. You can then use the "Choose a file..." option once it's downloaded. + +## Currently only a few images are available + +![image](/images/2017/01/Flasher-CHIP-image-selection-1.png) + +**However this chrome plugin could be used with any custom OS image build pipeline to deploy directly to hardware from the web!** + +We've took [resinOS](https://github.com/resin-os) and [ported it to the +C.H.I.P.](https://gitlab.ii.org.nz/iichip/resin-chip/blob/fastbuild/chip.json) +and think it would be useful to flash any AllWinner/FEL device directly from +gitlab or other ci automation pipeline web interface. + +![image](/images/2017/01/resin-chip-build-1.png) + +What if we could replace the _Download_ link with _Flash_ directly? + +![image](/images/2017/01/resinos-chip-download.png) + +## What's needed? + +* NextThingCo recently updated their + [BuildRoot](https://github.com/NextThingCo/CHIP-buildroot) to [use a new + image + format](https://bbs.nextthing.co/t/new-chip-sdk-chip-tools-update/12980) + which we still need to update our [resin port to + C.H.I.P.](https://gitlab.ii.org.nz/iichip/resin-chip/issues/1) + to use. +* NextThingCo also [doesn't seem to + publish](https://github.com/NextThingCo/CHIP-SDK/issues/18) the plugin + source, but [we've pulled it out of the plugin to + analyze](https://gitlab.ii.org.nz/iichip/CHIP-flasher-chromeplugin). + We have a couple open tickets: + * [Locate NextThingCo src for CHIP-flasher chrome plugin](https://gitlab.ii.org.nz/iichip/iichip/issues/1) + * [Build, install, and use the upstream sunxi-fel-chrome-plugin to install firmware to an iiCHIP](https://gitlab.ii.org.nz/iichip/sunxi-fel-chrome-extension/issues/1) + * [Deliver forked CHIP images via a webpage](https://gitlab.ii.org.nz/iichip/iichip/issues/2). diff --git a/blog/content/blog/funding-community-initiatives.md b/blog/content/blog/funding-community-initiatives.md new file mode 100644 index 0000000..b933fa4 --- /dev/null +++ b/blog/content/blog/funding-community-initiatives.md @@ -0,0 +1,36 @@ ++++ +title = "funding community initiatives" +author = ["Hippie Hacker"] +date = 2016-09-14 +lastmod = "Wed Sep 14 04:29:06 NZST 2016" +summary = "If you want to cobudget with us, write your email and #chilli4change on a physical envolope with your contribution and drop it by in person..." ++++ + + +_If you want to [@cobudget](https://twitter.com/cobudget) with us, write your email and [#chilli4change](https://twitter.com/hashtag/chilli4change) on an physical envelope with your contribution and drop it by in person at 148 Durham Street, Tauranga._ + +Your sharing of community wants, values, and feelings at [#chilli4change](https://twitter.com/hashtag/chilli4change) Friday now meets a _call to action!_ + +You wanted Thai or Mexican for our next meal. You also wanted to invite [Arturo Pelayo](http://www.arturopelayo.com/) from [Tactile Astronomy](https://twitter.com/tactileedu) + +We all put money into envelopes with our emails on them, so we could [@cobudget](https://twitter.com/cobudget) together to decide what to eat and how to get our next speaker here! + +You will now have received an email from iiCobudget: + +![image](/images/2016/09/2016-09-13-115634_646x278_escrotum.png) + +You can [see how it works](https://docs.google.com/presentation/d/1ZQYKxhHwKuQGmOMPpoE8Eo0XMuw1yn55Bjgsh6-D0eQ/pub?start=true&loop=true&delayms=5000&slide=id.ge08287da5_10_0), but we are going to go ahead and walk through clicking on getting started. + +![image](/images/2016/09/2016-09-13-120248_415x462_escrotum.png) + +You just need to fill in your name, and choose a password. + +![image](/images/2016/09/2016-09-13-120912_650x537_escrotum.png) + +Now you have the opportunity to allocate the money from your envelope into existing idea buckets or create your own! + +![image](/images/2016/09/2016-09-13-121209_824x565_escrotum.png) + +Looking forward to seeing us host [Arturo](http://www.arturopelayo.com/) at the next [#chilli4change](https://twitter.com/hashtag/chilli4change)! + +[@hippiehacker](https://twitter.com/hippiehacker) diff --git a/blog/content/blog/grpc-learning.md b/blog/content/blog/grpc-learning.md new file mode 100644 index 0000000..e5c3253 --- /dev/null +++ b/blog/content/blog/grpc-learning.md @@ -0,0 +1,213 @@ ++++ +title = "Learning Update: Introduction to gRPC" +author = ["Zach Mandeville"] +date = 2021-03-09 +lastmod = 2021-03-10T15:19:29+13:00 +categories = ["learning"] +draft = false +summary = "An update on an ii member's journey on learning gRPC" ++++ + +## Prelude {#prelude} + +As I continue my career in code, I've come to find the most important part of my +practice is also the least visible: how I learn. There are beautiful moments +when I know exactly how to do something and just need to implement it so I'll +pour myself a cup of coffee, put [Firestarter](https://www.youtube.com/watch?v=wmin5WkOuPw) on repeat, and watch my beautiful code +unfurl down the screen as fast as I can type it. These moments, though, are not +typical. + +Most of the time, I am discovering a new problem I do not yet know how to solve, +within a domain or technology I have not yet experienced, and to solve the +problem i have to first understand it. Here my coding life is a bit quieter and +boring to watch: I'll pour myself a cup of tea, put Firestarter(lo-fi ambient +remix) on repeat, and start poring through reference docs and tutorials and +writing "TODO: FIGURE OUT WHAT {X} MEANS" in my expanding network of notes. + +This work is crucial for code, but often unseen, happening silently in the space +between git commits. And so, to celebrate this work and make it more visible, +we'll be posting periodic learning updates on this blog. These are written as +honest checkpoints taken mid-understanding, so while they are hopefully +illuminating, they should not be read as any sort of authoratative guide. + +Sweet as, let's set a checkpoint! Right now, I'm learning all about gRPC and +protocol buffers and am quite excited about everything I've found. + + +## gRPC: what's it mean? {#grpc-what-s-it-mean} + +gRPC stands for (google)Remote Procedure Call. It is an evolution of Remote +Procedure Calls, which is one of the primary models of api design (the other +being REST). So RPC involves specifying how clients and servers should +communicate with one another, but using a completely different paradigm than +REST. One of the most immediate distinctions, for me, is with REST you have +paths on the server that you make requests to, whereas with RPC it's more like +methods of a server interface that you can call. This is the "remote procedure" +aspect of the design, where on the client's side, the communication feels like +running functions directly on the server. + +The way gRPC operates, sort of the material of the design, is with protocol +buffers. And so to learn gRPC you want to have a good understanding of protocol +buffers (or protobuf) first. + + +## Protocol Buffers: What do they mean? {#protocol-buffers-what-do-they-mean} + +Protocol Buffers are another creation of Google, and are a way to define and +serialize data. They tackle the same problem as XML or JSON, but in a much +different way. + +Protocol buffers work by defining a fully typed contract for your API in a +.proto file, which is then used to generate source code and compile your data +into streamable bytes. So the data being passed along is binary instead of +text-based, but the specification of this data is extremely readable, and +can easily generate introspective tools and documentation. + +Proto buffers also feel distinct in that they were designed with modern +technology and modern paradigms. So they work with HTTP/2 and work extremely +well for micro-services architectures utilizing streams of data. This HTTP/2 +requirement also means, though, that they cannot be consumed direclty by a web +browser. + + +## Well-Known Advantages of gRPC and protobuf {#well-known-advantages-of-grpc-and-protobuf} + +Many of the advantages of gRPC are articulated well on the grpc.io homepage and +other blogs and resources. I do not want to reiterate the same points, and will +have links to resources I find useful at the bottom of this post. In short, +gRPC: + +- saves network bandwidth +- provides faster and more efficient communication +- can be used by any language +- offers client-streaming, server-streaming, and bidirectional streaming services +- allows for easy evolution and iteration of your api, while keeping backward compatability. +- has an api contract that is easy to write and understand. + + +## My favourite things so far about gRPC {#my-favourite-things-so-far-about-grpc} + +Since I am just starting to explore gRPC, I cannot speak well to the system-wide +advantages of it and how I find it works in production. There are immediate +ergonomic and conceptual advantages to it though that I find quite exciting. + + +### Writing and Reading API's {#writing-and-reading-api-s} + +For one, the type definitions makes writing your api, and understanding others, +quite simple. You can read a \`.proto\` file as if it were documentation (and +still generate documentation from it). For example, a service that takes a +subject and returns a poem would look like this: + +```text +syntax = 'proto3'; + +message Subject { + string name = 1; + string mood = 2; + repeated string keywords = 3; +} + +message Poem { + string title = 1; + string body = 2; + int32 edition = 3; +} + +message PoemGeneratorRequest { + Subject subject = 1; +} + +message PoemGeneratorResponse { + Poem poem = 1; +} + +service PoemService { + rpc PoemGenerator(PoemGeneratorRequest) returns (PoemGeneratorResponse) {}; +} +``` + +I found that, with no knowledge of the syntax of protocol buffers, I could +understand specs like this immediately. Much of the proto's syntax is +understanble through context clues. You define some messages that are made up of +fields with specific types, and then define a services for passing these +messages. With protobuf, you work from foundational types that then get +increasingly complex while maintaining consistent syntax. This is possible in a +REST API too through discipline and convention, but here that discipline is +baked into the structure itself. + +Also, evolving an API is relatively simple. If I wanted to introduce a new field +in my poem subjects, it would look like so: + +```text +message Subject { + string name = 1; + string mood = 2; + repeated string keywords = 3; + string season = 4; +} +``` + +Each field has a default value, which is used if no other value is provided. So +services set up for the older api would not pass along the `season` field, and +it'd be interpreted as an empty string. Similarly, if we send messages from the +new api to an old service, it will simply drop any field it doesn't understand. +Deprecating fields requires a bit more work, but is equally straightforward. So +while you will need to ensure your clients account for default values, gRPC +makes it simple to evolve your api without breaking changes. + + +### Code generation and tool integration {#code-generation-and-tool-integration} + +One awesome part of protobuf and gRPC is its code generation. After you've +defined your API, you can use the program [protoc](https://github.com/protocolbuffers/protobuf) to generate code into several +languages. This means much of the logic for my server and client is taken care +of for me, and I could focus on the business logic. + +protoc outputs to several different languages, but the one I've been working +with is Go. Go also originated in Google, and you can feel the shared principles +and purpose through how well integrated these three services are. The biggest +productivity boost for me was the LSP integration. I would define a new service, +generate the go code, switch over to my server code and as I started to type the +service's name, my editor would immediately start showing me the methods +available to this service and their signatures. It is like having a quiet, eager +assistant handing you all your tools as you need them. It also meant that I was +immediately working on my code at this strategic higher-level. I was concerned +with the structure and flow of data as so much of the implementation code was +generated for me. + + +### Reflection and Introspection {#reflection-and-introspection} + +Lastly, a quality of gRPC that makes it real exciting to learn is in the ease of +its introspection. The typed nature of protobuf allows for easy, consistent +integration with a range of tools beyond your own services. I saw that +immediately with the LSP integration and emacs, but was truly chuffed when I +discovered the [Evans CLI](https://github.com/ktr0731/evans). If you have reflection enabled on your server, which +is straightforward to do, then you can immediately start communicating with it +using Evans. Evans reminded me a bit of the postgres client \`psql\`, which is one +of my favourite tools. With both, use a simple set of commands to investigate +and richly describe the service you're building in a repl environment. It turns +the development of your services into this dynamic, tangible experience that +rewards curiosity. + +I know I have a lot to learn about gRPC, but I am immediately pleased, and +grateful, that the framework has so many features that makes the learning +experience rewarding and fun. + + +## Resources {#resources} + +I've found the following online resources useful for getting into the why's and +how's of gRPC and protobuf: + +- [grpc.io's official docs](https://grpc.io/docs/what-is-grpc/introduction/) are quite good and a great introduction. +- [The Developer Docs for Protocol Buffers](https://developers.google.com/protocol-buffers/docs/overview) is similarly good. +- [Alan Shreve's Talk on gRPC](https://www.youtube.com/watch?v=RoXT%5FRkg8LA) is fun and engaging, and he offers a good + high-level look at the framework, its historical context, and its benefits. +- [Stephan Maarek's gRPC class on Udemy](https://www.udemy.com/course/grpc-golang/) is in-depth, patient, and hands-on. It is + a good balance of theory and implementation, with enough footholds for you to + go on and learn more. +- [Lyft's Envoy: from Monolith to Service Mesh](https://www.youtube.com/watch?v=RVZX4CwKhGE&t=2915s) is a talk by Matt Klein about the + Envoy proxy, which is a gRPC api. It's a good talk, that shows the exciting + and complex things you can design with this framework. diff --git a/blog/content/blog/helping-blind-youth-to-see-the-stars.md b/blog/content/blog/helping-blind-youth-to-see-the-stars.md new file mode 100644 index 0000000..6e96a9b --- /dev/null +++ b/blog/content/blog/helping-blind-youth-to-see-the-stars.md @@ -0,0 +1,32 @@ ++++ +title = "helping blind youth to see the stars" +date = 2016-09-05 +author = ["Hippie Hacker"] +lastmod = "Mon Sep 05 08:18:28 NZST 2016" +summary = "We got together with people who create and code at the osos.nz to innovate together on opensource projects" ++++ + + +## Funding 3D printed astronomy + +We got together with people who create and code at the [osos.nz](http://osos.nz) [hackathon](http://hackathon.opensourceopensociety.com/) to innovate together on opensource projects. + +[Arturo Pelayo](http://www.arturopelayo.com/) uses 3D Printed Astronomy to allow the **blind to experience the stars** at [Tactile Astronomy](http://twitter.com/tactileedu). + +* Tactile Astronomy needed hardware for augmenting their 3D Prints /_which was funded_/ +* They also need software to coordinate human interactions with hardware /_needs funding_/ + +[Gardner Bickford](https://github.com/gardner) is making it easy for people to **vote with your money** at [Cobudget](https://twitter.com/cobudget) + +* [OS//OS](http://osos.nz) gave everyone at the conference $5 to spend +* they invited us to create [bucket to fund hardware](http://cobudget.co/#/buckets/662) +* the hardware was [full funded](https://twitter.com/hippiehacker/status/771418829364731904) by the close of the conference + +[ii](ii.delivery) wants to **give them both a platform for shared innovation**: + +* to collaboratively edit shared code on a website +* and have that code running on real hardware instantly + +We look forward to working closely with Arturo and Gardner in the coming weeks to help them innovate openly together. + +[@hippiehacker](http://twitter.com/hippiehacker) diff --git a/blog/content/blog/ii-wants-chilli4change-@basestationnz.md b/blog/content/blog/ii-wants-chilli4change-@basestationnz.md new file mode 100644 index 0000000..fddc458 --- /dev/null +++ b/blog/content/blog/ii-wants-chilli4change-@basestationnz.md @@ -0,0 +1,42 @@ ++++ +title = "ii wants #chilli4change @basestationnz" +author = ["Hippie Hacker"] +date = 2016-09-09 +lastmod = "Fri Sep 09 05:24:02 NZST 2016" +summary = "We hope this initial #chili4change provides a forum for a heated discussion of innovation around an (optionally) spicy meal brought to you by ii" ++++ + + +![display of chili-based food](/images/2016/09/chilli4change-food.png) +We hope this initial [#chilli4change](https://twitter.com/hippiehacker/status/773276669020778497) provides a forum for a _heated_ discussion of innovation around an (optionally) spicy meal brought to you by [ii](http://blog.ii.delivery). + +ii wants to help [Tactile Astronomy](https://twitter.com/tactileedu/status/772705366891048960) provide a tactile experience of the heavens to blind youth + +![logo for tactile astronomy](/images/2016/09/TactileAstronomy_640x290.jpg) + +ii wants to help [CoBudget](https://docs.google.com/presentation/d/1ZQYKxhHwKuQGmOMPpoE8Eo0XMuw1yn55Bjgsh6-D0eQ/present?slide=id.p) provide collaborative funding for crowds with a purpose + +![screenshot of cobudget success message](/images/2016/09/bucket-was-funded.png) + +ii invites you to participate to a conversation about innovation over [#chilli4change](https://twitter.com/hashtag/chilli4change) as we collaborativly invite one of these organizations to speak in Tauranga + +Our resident [Hippie Hacker](http://twitter.com/hippiehacker) at [basestation.nz](basestation.nz) will be demoing some collaboration technology that enables continous innovation. + +![Hippie with his mind blown](/images/2016/09/mindblown.jpg) + +We look forward to a *heated discussion* over a meal of Texas Vegan Chilli. + +``` +Noon Friday 9th of September +148 Durham Street, Tauranga, NZ +``` +We hope to do this again. To collaboratively fund the next event, +there will be koha buckets for chilli based food dishes from other cultures. (in addition to help host the next speaker) + + + +[@hippiehacker](http://twitter.com/hippiehacker) + + + + diff --git a/blog/content/blog/new-contributor-summit-session-01.md b/blog/content/blog/new-contributor-summit-session-01.md new file mode 100644 index 0000000..d8bed79 --- /dev/null +++ b/blog/content/blog/new-contributor-summit-session-01.md @@ -0,0 +1,304 @@ ++++ +title = "New Contributor Summit Session 01" +author = ["Zach Mandeville"] +date = 2021-02-05 +lastmod = 2021-02-05T15:56:17+13:00 +section = "post" +tags = ["kubernetes", "ncw", "testing"] +categories = ["guides"] +draft = true +summary = "Intro to Testing for new K8s contributors" +[menu.main] + identifier = "new-contributor-summit-session-01" ++++ + +## Introduction {#introduction} + + +## Agenda {#agenda} + +In this session we will tackle + +- your hardware and OS requirements +- installing all prerequisites +- github and git configuration +- forking and cloning kubernetes +- the kubernetes git workflow + + +## Hardware and OS Requirements {#hardware-and-os-requirements} + +Can run on linux, mac, and windows. + + +### Hardware Requirements {#hardware-requirements} + +kubernetes is a large project, will require a lot of computing power + +- 8GB of RAM +- at least 50gb of free disk space +- multiple core + + +#### If running kubernetes in docker {#if-running-kubernetes-in-docker} + +If using Docker for Mac (or Windows), dedicate the Docker system multiple CPU cores and 6GB RAM + + +### Linux {#linux} + +No additional considerations needed + + +### Mac {#mac} + + + +- will need to insstall +- Will need to install some command line tools + + + +```shell +brew install coreutils ed findutils gawk gnu-sed gnu-tar grep make +``` + +- set up some bashrc + + + +```nil +GNUBINS="$(find /usr/local/opt -type d -follow -name gnubin -print)" + +for bindir in ${GNUBINS[@]} +do + export PATH=$bindir:$PATH +done + +export PATH +``` + + +### Windows {#windows} + +additional steps, listed here + + +- if using windows 10, then setup the linux subsystem +- if on < windows 10, switch to a virtual machine running linux + + +## Software Prerequisites {#software-prerequisites} + + +### Docker {#docker} + + +#### What is docker {#what-is-docker} + +Docker is a set of platform as a service (PaaS) products that use OS-level virtualization to deliver software in packages called containers. +Containers are isolated from one another and bundle their own software, libraries and configuration files; they can communicate with each other through well-defined channels. +All containers are run by a single operating system kernel and therefore use fewer resources than virtual machines. + + +#### Check if you have docker installed {#check-if-you-have-docker-installed} + +The operating-system independent way to check whether Docker is running is to ask Docker, using the docker info command. +You can also use operating system utilities, such as + +```nil +shell sudo systemctl is-active docker +``` + +or + +```nil +sudo status docker +``` + +or + +```nil +sudo service docker status +``` + +or checking the service status using Windows utilities. +Finally, you can check in the process list for the \`dockerd\` process, using commands like + +```nil + ps +``` + +or + +```nil + top +``` + + +#### Installing docker {#installing-docker} + +\*Docker Engine is available on a variety of Linux platforms, macOS and Windows 10 through Docker Desktop, and as a static binary installation. +Find your preferred operating system below. + + + +- MacOS + + Instruction for MacOS [install](https://docs.docker.com/docker-for-mac/install/) + + + +- Linux + + Instuctions for + Debain [install](https://docs.docker.com/engine/install/debian/) + Fedora [install](https://docs.docker.com/engine/install/fedora/) + Ubuntu [install](https://docs.docker.com/engine/install/ubuntu/) + + + +- Windows + + Docker Desktop for Windows is the Community version of Docker for Microsoft Windows. + You can download Docker Desktop for Windows from Docker Hub to [install](https://docs.docker.com/docker-for-windows/install/) + + +### Git {#git} + + +#### What is git {#what-is-git} + +GitHub provides hosting for software development and version control using Git. +It offers the distributed version control and source code management (SCM) functionality of Git, plus its own features. +It provides access control and several collaboration features such as bug tracking, feature requests, task management and continuous integration. + + +#### Check if you have git installed {#check-if-you-have-git-installed} + + +#### Installing git {#installing-git} + +In a terminal window run +\`git --version\` +If it is installed you will get a message like \`git version 2.25.1\` + + + +- Mac + + [Installing on macOS](https://github.com/git-guides/install-git#install-git-on-mac) + + + +- Linux + + [Installing on Linux](https://github.com/git-guides/install-git#install-git-on-linux) + + + +- Windows + + [Installing on Windows](https://github.com/git-guides/install-git#install-git-on-windows) + + +#### Configure git {#configure-git} + +To use get you need a Github account. +If you do not have an account yet go to the [Github](https://github.com/) website to sign up. +You'll need: + +- name +- email +- password + +preparing for working with the k8s repo. + + +### Go {#go} + + +#### What is go {#what-is-go} + +Go or [Golang](https://golang.org/) as it is also known is an open source programming language that makes it easy to build simple, reliable, and efficient software. + + +#### Installing go {#installing-go} + +We want to make check is Go is installed and what version. +Open Command Prompt / CMD ot Terminal window, execute the command to check the Go version. Make sure you have the latest version of Go. +$ go version + +If you need to install Go the [official installation page](https://golang.org/doc/install) have struction for Linux, Mac and Windows + + +#### Adding go to your path {#adding-go-to-your-path} + +and knowing how to find your $GOPATH -- We can look here: + + +### SSH Keys {#ssh-keys} + + +#### what is ssh {#what-is-ssh} + +SSH is a secure protocol used as the primary means of connecting to Linux servers remotely. +It provides a text-based interface by spawning a remote shell. +After connecting, all commands you type in your local terminal are sent to the remote server and executed there. +SSH keys are a matching set of cryptographic keys which can be used for authentication. Each set contains a public and a private key. +The public key can be shared freely without concern, while the private key must be vigilantly guarded and never exposed to anyone. + + +#### creating a new ssh key {#creating-a-new-ssh-key} + +To generate an RSA key pair on your local computer, type: + +- ssh-keygen + +This will create to files in the .ssh directory. Your private key id\_rsa. and public key id\_rsa.pub + + +## Github configuration {#github-configuration} + + +### Signing up for github account {#signing-up-for-github-account} + + +### Uploading your SSH Key {#uploading-your-ssh-key} + + +### Signing the CNCF CLA {#signing-the-cncf-cla} + + +## Forking and Cloning K8s {#forking-and-cloning-k8s} + + +### brief tour of k8s repo {#brief-tour-of-k8s-repo} + + +### forking to your own repo {#forking-to-your-own-repo} + + +### cloning k8s down to your own computer {#cloning-k8s-down-to-your-own-computer} + + +## The Kubernetes git workflow {#the-kubernetes-git-workflow} + + +### k8s/k8s is 'upstream' {#k8s-k8s-is-upstream} + + +### you create a branch on your fork, and push and make changes. {#you-create-a-branch-on-your-fork-and-push-and-make-changes-dot} + + +### then open a pr in upstream, comparing across forks. {#then-open-a-pr-in-upstream-comparing-across-forks-dot} + + +## Getting Additional Help {#getting-additional-help} + +We won't be doing this live, but are there other resources we can offer for help? perhaps a slack channel that we'd be moderating during NCW times? A repo in which they can open issues for their questions? + + +## What's Next? {#what-s-next} + +Outline of session 2. You have all the requirements, now we will build and hack on kubernetes! diff --git a/blog/content/blog/new-contributor-summit-session-02.md b/blog/content/blog/new-contributor-summit-session-02.md new file mode 100644 index 0000000..9990107 --- /dev/null +++ b/blog/content/blog/new-contributor-summit-session-02.md @@ -0,0 +1,110 @@ ++++ +title = "New Contributor Summit Session 02" +author = ["Zach Mandeville"] +date = 2021-02-05 +lastmod = 2021-02-05T15:58:25+13:00 +tags = ["kubernetes", "ncw", "testing"] +categories = ["guides"] +draft = true +summary = "Part Two to our intro to testing for new K8s contributors" +[menu.main] + identifier = "new-contributor-summit-session-02" ++++ + +## Introduction {#introduction} + +- In this Session we will introduce you to the make command and kubernetes cmd folder. +- You'll also learn about KinD (kubernetes in docker) +- We'll learn more about how k8s buid process works + + By the end, you will edit and build a kubernetes command that you can run on your own kind cluster! + ****This session continues on Session 1. If you haven't done that one yet, do it first!**** + + +## Agenda {#agenda} + +- Intro to make +- Intro to CMD +- The Build Process +- Intro to KinD +- Editing and Building +- Running our command on KinD + + +## Make {#make} + + +### What it is {#what-it-is} + + +### Ensuring you have it on your computer {#ensuring-you-have-it-on-your-computer} + + +### How we use it {#how-we-use-it} + + +## The CMD Folder {#the-cmd-folder} + + +### Where to find it {#where-to-find-it} + + +### What it is {#what-it-is} + + +## Making in Parts {#making-in-parts} + +Why do we not make all of kubernetes (no don't run make release) +What do we make in isolation? + + +## Verify Dev envrionment ready to go {#verify-dev-envrionment-ready-to-go} + +_if needed, include instructions for each type of OS_ + + +### Have Docker {#have-docker} + + +### Have git {#have-git} + + +### Have Go {#have-go} + + +#### GOPATH set {#gopath-set} + + +### Fork of k8s cloned to dev environment {#fork-of-k8s-cloned-to-dev-environment} + + +## Run a make command {#run-a-make-command} + + +### cd into k8s from yr terminal {#cd-into-k8s-from-yr-terminal} + + +### make WHAT=cmd/kubectl {#make-what-cmd-kubectl} + +maybe edit the print output for fun, and see it change + + +## Make a KinD Cluster {#make-a-kind-cluster} + + +### What is kind? {#what-is-kind} + + +### Install Kind {#install-kind} + + +### kind create cluster {#kind-create-cluster} + + +## Use newly-built kubectl binary in the KinD cluster {#use-newly-built-kubectl-binary-in-the-kind-cluster} + + +## Additional Help {#additional-help} + + +## What's Next? {#what-s-next} diff --git a/blog/content/blog/new-contributor-summit-session-03.md b/blog/content/blog/new-contributor-summit-session-03.md new file mode 100644 index 0000000..17fcd5c --- /dev/null +++ b/blog/content/blog/new-contributor-summit-session-03.md @@ -0,0 +1,117 @@ ++++ +title = "New Contributor Summit Session 03" +author = ["Zach Mandeville"] +date = 2021-02-05 +lastmod = 2021-02-05T16:00:03+13:00 +tags = ["kubernetes", "ncw", "testing"] +categories = ["guides"] +draft = true +summary = "Part Three to our intro to testing for new K8s contributors" +[menu.main] + identifier = "new-contributor-summit-session-03" ++++ + +## Introduction {#introduction} + +In this, we'll ramp up our abilities by adding testing into the mix! +Like session 2, we will edit, make, and run commands like kubectl, but now checking our builds with unit testing, using go test. +With these tests, we'll have increased confidence in contributing our work back upstream and so we'll also talk about pull requests, and the PR pre-submission practices. + + +## Agenda {#agenda} + +- Setup our Dev environments +- introduce unit tests +- testing with go test and make +- PR's +- An intro to prow and test grid + + +## Setup {#setup} + + +### Kind {#kind} + + +### a working kubectl binary of some sort {#a-working-kubectl-binary-of-some-sort} + + +### go {#go} + + +### make {#make} + + +## Edit our kubectl binary {#edit-our-kubectl-binary} + +Adjust its message again, or have it do something in addition +don't build just yet + + +## Unit Tests {#unit-tests} + + +### what are they? {#what-are-they} + + +### why they important? {#why-they-important} + + +### how k8s uses them {#how-k8s-uses-them} + + +## Write a unit test for our kubectl binary {#write-a-unit-test-for-our-kubectl-binary} + + +## check our test with go test {#check-our-test-with-go-test} + + +## check our test with make {#check-our-test-with-make} + + +## Test scope {#test-scope} + + +### only run some tests {#only-run-some-tests} + + +### run all tests {#run-all-tests} + + +### time to run all tests {#time-to-run-all-tests} + + +## PR's {#pr-s} + +- review the pr flow again +- review the PR pre-submission guidelines +- review the style guidelines +- show some of the checks done on an existing pr and the checks for the pre-submission and style +- what is doing these checks? + + +## Prow {#prow} + + +### k8s git ops {#k8s-git-ops} + + +### helps manage these steps of the pr {#helps-manage-these-steps-of-the-pr} + + +### ensures yr pr follows the guidelines and passes all existing tests. {#ensures-yr-pr-follows-the-guidelines-and-passes-all-existing-tests-dot} + + +## Testgrid {#testgrid} + + +### show all the tests being run and their success {#show-all-the-tests-being-run-and-their-success} + + +### this can be optional, and so {#this-can-be-optional-and-so} + + +## Additional Help {#additional-help} + + +## What's Next? {#what-s-next} diff --git a/blog/content/blog/provisioning-ghosts-in-the-machine.md b/blog/content/blog/provisioning-ghosts-in-the-machine.md new file mode 100644 index 0000000..347dd11 --- /dev/null +++ b/blog/content/blog/provisioning-ghosts-in-the-machine.md @@ -0,0 +1,54 @@ ++++ +title = "Provisioning Ghosts in the Machine" +date = 2015-10-30 +lastmod = "Fri Oct 30 14:35:14 NZDT 2015" +draft = true +author = ["Hippie Hacker"] +summary = "I set this blog up in a few minutes using a chef-provisoining recipe, ghost-cookbook, and the new do-api-v2 support." ++++ + +I set this blog up in just a few minutes using https://github.com/cnunciato/ghost-cookbook +and a simple chef-provisioning recipe and the new [do-api-v2](http://blog.ii.delivery/do-api-v2/) support. + +I'd like to add support for syntax highlighting and comments at some point. + +You can run this with ```DOTOKEN=XXX chef-client -z THISRECIPE.rb``` + +```language-ruby +with_driver 'fog:DigitalOcean', compute_options: { + digitalocean_token: ENV['DOTOKEN'] + } + +with_machine_options convergence_options: { + chef_version: '12.4.3', + package_cache_path: '.chef/package_cache' + }, + bootstrap_options: { + image_distribution: 'Ubuntu', + image_name: '14.04 x64', + region_name: 'New York 3', + flavor_name: '2GB', + key_name: 'iido', + tags: { + 'ii' => 'lovesyou' + }, + } + +with_chef_server 'https://api.chef.io/organizations/ii', + :client_name => Chef::Config[:node_name], + :signing_key_filename => Chef::Config[:client_key] + +machine 'do.ii.delivery' do + action :destroy if ENV['DESTROY'] + recipe 'ghost::default' + recipe 'ghost::nginx' + # attribute %w[ ghost app mail transport ], 'SMTP' + # attribute %w[ ghost app mail options service ], 'Gmail' + # attribute %w[ ghost app mail options auth user ], 'smtp@hippiehacker.org' + # attribute %w[ ghost app mail options auth pass ], 'SOMETHING' + # attribute %w[ ghost remote name ], 'darepo' + # attribute %w[ ghost remote repo ], 'git@github.com:ii/ghostcontent.git' + # attribute %w[ ghost remote revision ], 'master' +end + +``` diff --git a/blog/content/blog/pushing-vagrant-speed-with-squid-for-all-http-+-https-traffic.md b/blog/content/blog/pushing-vagrant-speed-with-squid-for-all-http-+-https-traffic.md new file mode 100644 index 0000000..967122c --- /dev/null +++ b/blog/content/blog/pushing-vagrant-speed-with-squid-for-all-http-+-https-traffic.md @@ -0,0 +1,33 @@ ++++ +title = "Pushing vagrant speed with squid for all http + https traffic" +date = 2015-12-19 +author = ["Hippie Hacker"] +lastmod = "Sat Dec 19 05:50:25 NZDT 2015" +summary = "docker run proxy exports an https certificate AND three ports for http / https proxying" ++++ + + +docker run proxy exports an https certificate AND three ports for http / https proxying + +3128 is as standard http/https proxy. +3129 is intended for use as an http transparent proxy +3130 is intended for use as an https transparent proxy + +However it requires that we get ip tables to transparently change the destination traffic (the host in unaware) to connect to the transparent proxies. + +Configure iptables to Pid [Owner Match](https://www.frozentux.net/iptables-tutorial/iptables-tutorial.html#OWNERMATCH) vagrant so that all traffic within the VM get's transparently proxies to iisquid. + +Squid uses this certificate to create on the fly ssl certificates for any host signed by it's own CA. However, the ssl structure requires the connecting clients trusting the self-signed CA created for use with squid. (A couple countries are suggesting / requiring all their folks use this approach so the government can intercept and block) + +We need to find a way to install this ca into the OSes running on devices / virtual or otherwise, we want to test. + +Current target is windows, which looks straight forward. + +Need to merge and get current to use compose etc: + +https://github.com/hh/squid-in-a-can/network + +And our approach to creating static binaries using tcl-container... I'm not sure of + +iptables modules no longer support owner +http://manpages.ubuntu.com/manpages/trusty/man8/iptables-extensions.8.html diff --git a/blog/content/blog/reducing-chef-client-load-time-on-windows.md b/blog/content/blog/reducing-chef-client-load-time-on-windows.md new file mode 100644 index 0000000..f8dce6d --- /dev/null +++ b/blog/content/blog/reducing-chef-client-load-time-on-windows.md @@ -0,0 +1,10 @@ ++++ +title = "Reducing chef-client Load Time on Windows" +author = ["Hippie Hacker"] +date = 2015-12-18 +lastmod = "Fri Dec 18 07:32:13 NZDT 2015" +draft = true ++++ + + +http://code.gnslngr.us/?p=73 diff --git a/blog/content/blog/rerouting-container-registries-with-envoy.md b/blog/content/blog/rerouting-container-registries-with-envoy.md new file mode 100644 index 0000000..bfd226e --- /dev/null +++ b/blog/content/blog/rerouting-container-registries-with-envoy.md @@ -0,0 +1,192 @@ ++++ +title = "Rerouting Container Registries With Envoy" +author = ["Caleb Woodbine"] +date = 2021-04-15 +lastmod = 2021-04-21T15:44:52+12:00 +tags = ["envoy", "oci", "containers", "discoveries"] +categories = ["discoveries"] +draft = false +weight = 2001 +summary = "Share the traffic across many container registries with Envoy" ++++ + +## Introduction {#introduction} + +In this post, I will detail the discovery of Envoy's dynamic rewriting location capabilities and the relationship to OCI registries. + +****What is [Envoy](https://www.envoyproxy.io/)?**** + +> open source edge and service proxy, designed for cloud-native applications + +****What is an [OCI container registry](https://opencontainers.org/)?**** + +> a standard, and specification, for the implementation of container registries + +I've been playing around with and learning Envoy for a number of months now. One of the concepts I'm investigating is rewriting the request's host. +Envoy is a super powerful piece of software. It is flexible and highly dynamic. + + +## Journey {#journey} + + +### My expectations {#my-expectations} + +The goal is to set up Envoy on a host to rewrite all requests dynamically back to a container registry hosted by a cloud-provider, such as GCP. + + +### Initial discoveries {#initial-discoveries} + +One of the first things I investigated was the ability to get traffic from one site and serve it on another (proxying). +I searched in the docs and in their [most basic example](https://www.envoyproxy.io/docs/envoy/v1.17.1/start/quick-start/configuration-static) could see that, by using envoy's http filter in the filter\_chains, a static host could be rewritten. + +Example: + +```yaml +... +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + http_filters: + - name: envoy.filters.http.router + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + host_rewrite_literal: www.envoyproxy.io + cluster: service_envoyproxy_io +... +``` + +This is a great start! This serves the site and its content under the host where Envoy is served. +However, the host in the rewrite is static and not dynamic. It seems at this point like doing the implementation this way is not viable. + + +### Learning about filter-chains {#learning-about-filter-chains} + +Envoy has the lovely feature to set many kinds of middleware in the middle of a request. +This middleware can be used to add/change/remove things from the request. +Envoy is particularly good at HTTP related filtering. It also supports such features as dynamic forward proxy, JWT auth, health checks, and rate limiting. + +The functionality is infinitely useful as filters can be such things as gRPC, PostgreSQL, Wasm, and even Lua. + + +### The implementation {#the-implementation} + +Once I found the ability to write Lua as a filter, I found that it provided enough capability to perform the dynamic host rewrite. + +```yaml +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + local reg1 = "k8s.gcr.io" + local reg2 = "registry-1.docker.io" + local reg2WithIP = "192.168.0.1" + function envoy_on_request(request_handle) + local reg = reg1 + remoteAddr = request_handle:headers():get("x-real-ip") + if remoteAddr == reg2WithIP then + request_handle:logInfo("remoteAddr: "..reg2WithIP) + reg = reg2 + end + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ii.coop + port_value: 443 +``` + +With envoy running this config, the behaviour of the requests is + +- rewrite all traffic hitting the web service to _k8s.gcr.io_ +- except if the IP is _192.168.0.1_ then set the location to _registry-1.docker.io_ + +Since I'm using a [Pair](https://github.com/sharingio/pair) instance, it sets the local subnet to _192.168.0.0/24_ so when I try to `docker pull humacs-envoy-10000.$SHARINGIO_PAIR_BASE_DNS_NAME/library/postgres:12-alpine` it will go to _docker.io_. + +On my local machine, pulling container images using `docker pull humacs-envoy-10000.$SHARINGIO_PAIR_BASE_DNS_NAME/e2e-test-images/agnhost:2.26` will instead use _k8s.gcr.io_. + +To achieve this, I research how other http libraries handle redirects - namely [Golang's net/http.Redirect](https://golang.org/src/net/http/server.go?s=66471:66536#L2179). +The main things that Golang's _http.Redirect_ does is: + +- set the _content-type_ header to _text/html_ +- set the location to the destination +- set the status code to 302 +- set the body to the same data in earlier steps, but in an _a_ tag. + + +## Final thoughts {#final-thoughts} + +I'm learning that Envoy is highly flexible and seemly limitless in it's capabilities. + +It's exciting to see Envoy being adopted in so many places - moreover to see the diverse usecases and implementations. + +Big shout out to [Zach](https://ii.coop/author/zach-mandeville) for pairing on this with a few different aspects and attempts! (Zach is cool:tm:) diff --git a/blog/content/blog/rpi-zero-gadget-support.md b/blog/content/blog/rpi-zero-gadget-support.md new file mode 100644 index 0000000..c76d359 --- /dev/null +++ b/blog/content/blog/rpi-zero-gadget-support.md @@ -0,0 +1,34 @@ ++++ +title = "RPi Zero Gadget Support" +date = 2015-12-06 +author = ["Hippie Hacker"] +lastmod = "Sun Dec 06 00:09:45 NZDT 2015" +summary = "For most OTG supported ports, it depends on what you plug into it that decides if the port is in host mode or gadget mode. We just need to get confirmation that the RPi zero port is wired the same way for the port that has it's usb data pins connected (not the one dedicated to power)." ++++ + + +For most OTG supported ports, it depends on what you plug into it that decides if the port is in host mode or gadget mode. We just need to get confirmation that the RPi zero port is wired the same way for the port that has it's usb data pins connected (not the one dedicated to power). + +["A device with a micro-A plug inserted becomes an OTG A-device, and a device with a micro-B plug inserted becomes a B-device. The type of plug inserted is detected by the state of the pin ID ."](https://en.wikipedia.org/wiki/USB_On-The-Go#OTG_micro_plugs) + +The RPi zero USB_OTGID pin should be grounded by the cable when using a micro-A / OTG cable and is hopefully left floating otherwise. That way we can the more common micro-B cable for providing power and connecting the usb data pins to a computer. + +Here is a RPi zero [mechanical diagram](https://www.raspberrypi.org/documentation/hardware/raspberrypi/mechanical/rpi-zero-v1_2_dimensions.pdf), that shows the two usb micro ports for the RPi zero in bottom right: + +![image](https://cloud.githubusercontent.com/assets/31331/11607199/43a2a708-9b0b-11e5-8d98-518769d4df19.png) + +I couldn't find wiring schematics for RPi zero, so I pulled these from the [RPi A](https://www.raspberrypi.org/wp-content/uploads/2012/10/Raspberry-Pi-R2.0-Schematics-Issue2.2_027.pdf) + +This is the usb micro port used for power: (no data pins) + +![image](https://cloud.githubusercontent.com/assets/31331/11607210/b6d0f310-9b0b-11e5-97fb-1de5d360647c.png) + +This is the RPi-A USB-A port (note that the USB_OTGID pin on the usb controller is grounded). On the RPi zero, this connector is a micro-b port, and shouldn't have the USB_OTGID pin grounded, as that is usually done by the cable to distinguish between OTG/Host mode and usb gadget mode. + +![image](https://cloud.githubusercontent.com/assets/31331/11607223/fecac092-9b0b-11e5-8bcf-1d3f2b8e107b.png) + +I suspect that if we connect a normal [usb A to Micro-B cable](https://en.wikipedia.org/wiki/USB#Cable_plugs_.28USB_1.x.2F2.0.29) that doesn't ground out the USB_OTGID pin on the RPi zero, that we can accomplish the correct physical connections without any modifications, but we may need the kernel changes mentioned in this [comment on #881]( https://github.com/raspberrypi/linux/issues/881#issuecomment-161411866) + +Having a $5 usb gadget that could function as a combination of anything in https://github.com/torvalds/linux/tree/master/drivers/usb/gadget/function would be pretty grand. + +I've opened a ticket for PRi linux kernel to [discuss this](https://github.com/raspberrypi/linux/issues/1212). diff --git a/blog/content/blog/sharing-values+feelings-and-spending-time+$-together.md b/blog/content/blog/sharing-values+feelings-and-spending-time+$-together.md new file mode 100644 index 0000000..60b946c --- /dev/null +++ b/blog/content/blog/sharing-values+feelings-and-spending-time+$-together.md @@ -0,0 +1,36 @@ ++++ +title = "sharing values+feelings and spending time+$ together" +date = 2016-09-12 +author = ["Hippie Hacker"] +lastmod = "Mon Sep 12 12:39:07 NZST 2016" +summary = "Friday, ii hosted the first #chilliforchange and spent some time asking our community to finish a few sentences: I want x, I value y, I feel z..." ++++ + + +Friday ii hosted the first [#chilli4change](https://twitter.com/hippiehacker/status/773276669020778497) and spent some time asking our community to finish a few sentences: + +``` +i want X +i value Y +i feel Z +``` + +To give a relavant to the room example, I proposed the following: + +``` +i want to eat Mexican food next time we talk about social impact +i value cumin, jalapenos, and innovation +i feel healthy and spicy when I discuss hot topics over food +``` + +We talked about the Maori parable behind _te rourou_: **With your basket and my basket the people will live** was brought to life via a _shared community food basket program_, where we asked our friends to give us two baskets and funds to fill them with fresh produce. One for each participant to keep, and another to give away. + +_Recipients of the free baskets wondered why someone would do such a thing?_ They would ask questions about what we were doing, and some even joined us. But the major impact came as feelings within those who gave. **Experiencing generosity on a face to face level, without a third party mediating.** Forcing those 'Tena Koe', 'I see you', and 'I accept you as you are' moments. + +We want to do the same in face to face discussions over hot meals, allowing us to create buckets together for meaningful purposes and allowing each of us to fund them if we share the same wants, values, and feelings. + +We gave everyone at the event a special invite to create shared baskets and spend the koha they gave towards the next event or elsewhere. For now ii will steward the funds in physical envelopes. + +[See how it works](https://docs.google.com/presentation/d/1ZQYKxhHwKuQGmOMPpoE8Eo0XMuw1yn55Bjgsh6-D0eQ/present?slide=id.p) + +We'll keep you posted as we see what buckets the attendees create and fund. We hope to find a way to easily invite others to participate. For now just drop by 148 Durham St for a coffee and a chat and we will get you connected for the next [#chilli4change](https://twitter.com/hippiehacker/status/773276669020778497) diff --git a/blog/content/blog/ssl-certificate-bindings-on-windows-with-chef.md b/blog/content/blog/ssl-certificate-bindings-on-windows-with-chef.md new file mode 100644 index 0000000..b9a563b --- /dev/null +++ b/blog/content/blog/ssl-certificate-bindings-on-windows-with-chef.md @@ -0,0 +1,72 @@ ++++ +title = "SSL Certificate Bindings on Windows with Chef" +date = 2016-09-06 +author = ["Hippie Hacker"] +lastmod = "Tue Sep 06 11:42:02 NZST 2016" +summary = "Enusring ssl certificates on windows are installed correctly." ++++ + + +I recently needed to ensure some ssl certificates on windows installed correctly. I opened an issue at [chef-cookbook/windows#313](https://github.com/chef-cookbooks/windows/issues/313) but the gist of it is here: + +```feature +As a windows chef user +I want to ensure a specific certificate binding to a port +In order to replace any existing binding with what I have specified + +Given a certificate in pfx form +And it's successfully imported +When I write a windows_certificate_binding resource stanza +And specify the desired subject or fingerprint +And there is already another certificate bound to the desired port +Then the desired certificate binding should replace the existing one +``` + +What you currently have to do (using an encrypted data bag with password, subject and fingerpint, and a files/default/certificate.pfx): + +```ruby +iis_site 'Default Web Site' do + action :config + site_id 1 + bindings 'http/*:80:,net.tcp/808:*,net.pipe/*,net.msmq/localhost,msmq.formatname/localhost,https/*:443:' +end + +decrypted = data_bag_item('passwords', "certificate") + +pfx = "c:\\chef\\certificate.pfx" + +cookbook_file pfx + +windows_certificate pfx do + pfx_password decrypted['password'] + store_name 'MY' + user_store false +end + +subject = decrypted['subject'] +fingerprint = decrypted['fingerprint'] + +#removing the current one IF it doesn't match +windows_certificate_binding 'Unbind any non-matching certs' do + action :delete + name subject + name_kind :subject + address '0.0.0.0' + guard_interpreter :powershell_script + not_if <<-EOF + Import-Module WebAdministration + $x = Git-Item IIS:\SslBindings\0.0.0.0!443 + $x.Thumbprint.CompareTo("#{fingerprint}") + EOF +end + +# bind the correct one... this should be all we need to specify... +# if there is already a binding on this port... it does nothing +# it should replace it in my opinion +windows_certificate_binding 'Reuse RDP and WINRM self-signed cert for IIS' do + action :create + name_kind :subject + name subject + address '0.0.0.0' +end +``` diff --git a/blog/content/blog/using-cookbook_file-per-node-on-windows.md b/blog/content/blog/using-cookbook_file-per-node-on-windows.md new file mode 100644 index 0000000..e3d1c1e --- /dev/null +++ b/blog/content/blog/using-cookbook_file-per-node-on-windows.md @@ -0,0 +1,64 @@ ++++ +title = "Using cookbook_file per Node on Windows" +date = 2016-09-06 +author = ["Hippie Hacker"] +lastmod = "Tue Sep 06 11:43:03 NZST 2016" +summary = "I needed to deploy a different node specific license file to our windows hosts so I wrote a cookbook_file resource..." ++++ + + +I needed to deploy a different node specific license file to our windows hosts so I wrote a [cookbook_file](https://docs.chef.io/resource_cookbook_file.html) resource that looked something like this: + +```ruby +cookbook_file 'C:\\Program Files (x86)\\vendor\\node.license' +``` + +And using the [file-specificity overhaul](https://github.com/chef/chef-rfc/blob/master/rfc017-file-specificity.md) I expected to be able to create a directory for each host under ```our-cookbook/files/host-NODENAME/our.license``` and have that be the file for that specific node. + +``` +our-cookbook $ tree files/host-nodes* +files/host-nodename1 +└── node.license +files/host-nodename2 +└── node.license +files/host-nodename3 +└── node.license +files/host-nodename4 +└── node.license +``` + +It took me a while to understand my failed assumptions. + +_NODENAME isn't the same as FQDN_ + +It usually is, but on windows ec2 instances they often differ. + +Ec2Config service is configured by default on many windows AMIs to [reset computer name on next boot](http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html#UsingConfigInterface_WinAMI) which results in new machines geting renamed to ```ip-IP_INHEX``` where IP_INHEX is the hex representation of the internal ip. + +``` +$ knife winrm -m $IP 'ohai' | grep fqdn +10.113.6.171 "fqdn": "ip-0A7106AB", +``` + +When we bootstrap an ec2 ami and give it an ec2 instance name and nodename for chef, the fdqn/hostname is often left as the default: + +``` +$ knife search node *:$NODE_NAME +1 items found + +Node Name: $NODE_NAME +Environment: OURENV +FQDN: ip-0A7106AB +IP: 10.113.6.171 +... +Platform: windows 6.3.9600 +``` + +The fix for getting our license files into place based on the chef nodename was to add a ```source``` parameter to our resource based on the ```node.name``` + +```ruby +cookbook_file 'C:\\Program Files (x86)\\vendor\\node.license'' do + # normally this is host-#{node['fdqn']} and on aws/windows than ip-HEXNUMBR + source "host-#{node.name}/node.license'" +end +``` diff --git a/blog/content/blog/xds-conformance-project.md b/blog/content/blog/xds-conformance-project.md new file mode 100644 index 0000000..d1a0713 --- /dev/null +++ b/blog/content/blog/xds-conformance-project.md @@ -0,0 +1,110 @@ ++++ +title = "xDS Conformance Project" +date = 2021-12-09 +author = ["Zach Mandeville"] +lastmod = "Fri Dec 09 00:09:49 NZDT 2021" +summary = "A look into the work ii has done on the xdS conformance test framework" ++++ + + +# Introduction + +I've had the privilege to spend the majority of this year working to develop an [xDS conformance test suite](https://github.com/ii/xds-test-harness). While this project is most definitely still in its infancy, the work is already rewarding and points to tremendous potential. Today, I wanted to introduce the conformance project and its goals, the ways we've approached the project, and the exciting progress made so far. + +# What is xDS? + +xDS, or extensible discovery service, is a set of APIs pioneered by [Envoy](https://envoyproxy.io). They allow for what is described as a [“Universal Data Plane API”](https://blog.envoyproxy.io/the-universal-data-plane-api-d15cec7a). In the example of Envoy, xDS allows a group to build their own control plane that, as long as it implements the relatively basic set of discovery services, can dynamically update all aspects of their Envoy service mesh. It is an idea that allows for a great amount of freedom and variety when working with Envoy, but has use cases beyond this proxy. + +For this reason, the conformance project is not a test suite for Envoy, but for the xDS protocol itself, specifically the transport layer. It is a set of testable behaviours for how resources are exchanged in an xDS api, without specifying the kinds of resources or the context in which they're exchanged. This will let us set up a rich, consistent definition of xDS so it can continue to extend, adapt, and grow to its full potential. + +# Some initial technical goals + +The conformance framework is intended as a single binary, and set of tests, that can be easily downloaded and run against someone’s xDS implementation. We decided to write the framework in Go due to its popularity in the community, the simplicity of its language, and its superb support for concurrency– as concurrency was central to the planned design of the framework. + +One of the goals of this project was to help build understanding of the protocol itself, to establish for the first time what ’conformant behaviour’ means within xDS. These APIs are young, and handle a lot of complex tasks within an already complex domain. While the documentation for Envoy is great, and growing, there is still a lot of undocumented behaviour, where the only way to understand how something *should* be working is to deduce it from the source code. From the beginning, we wanted both the framework and tests to be written as simply as possible, so they could articulate conformance as clearly and plainly as possible. + +# Our Test case syntax + +For testing, it is hard to find a more plain-spoken and articulate syntax than [Gherkin](https://cucumber.io/docs/gherkin/reference/), which is why we chose it for the conformance framework. Gherkin uses natural language, plus a small amount of keywords, so that the tests act as a shared language between stakeholders and developers. The first stage of the project was just to come up with a set of sample tests, just through meetings and a shared google doc. It made me quite happy to be able to take these sample test cases and transpose them, nearly verbatim, into executable tests. + +Gherkin is only a testing syntax and does not handle the testing functionality. For that, we use the [Godog library](https://github.com/cucumber/godog). Godog converts Gherkin into regex patterns that map to functions. One of the awesome benefits of this is that, if you are intentional and clever with how you write test cases, you can re-use functions across tests. This minimizes the amount of code you must write and maintain, while providing a consistent rhythm and readability to the tests. + +## An example + +For example, one of the behaviours we wanted to test is wildcard subscriptions. With xDS generally, a client will request specific resources from a server, and if those resources exist, they should be included in the server's response. However, the listener and cluster discovery services (LDS and CDS, respectively) allow for wildcard subscriptions, where a client can receive all resources for the service without having to specifically name them in their subscription request. + +In the original test case document, this scenario was described as so: + +> Server has resources A, B, and C. Client subscribes to wildcard. Server should send a response containing all three resources with some initial version and a nonce. Client sends an ACK with that version and nonce. + +This was translated into a test case as: + + Scenario Outline: The service should send all resources on a wildcard request. + Given a target setup with , , and + When the Client does a wildcard subscription to + Then the Client receives the and + And the Client sends an ACK to which the does not respond + +Each line then maps to a function in our test runner. The `` in the above lines represent example terms, which lets us provide an example table to this test. Godogs will run the test for each example given in the table, replacing each `` with the row's respective column. + +In other words, we can run the same test for two different services by simply adding this table beneath the test: + + Examples: + | service | starting version | resources | expected resources | + | "CDS" | "1" | "A,B,C" | "C,A,B" | + | "LDS" | "1" | "D,E,F" | "F,D,E" | + + +After testing wildcard subscriptions, we wanted to test subscription updates. If a client does a wildcard subscription, and any of the service resources change, then the server should send an updated response, without prompting, to the client. + +This test, as described, follows a similar pattern as the first one: there is an initial state, actions occur, and they trigger expected responses. Since they share a pattern, the tests can share code. + +We were able to write this second test as: + + Scenario Outline: The service should send updates to the client + Given a target setup with , , and + When the Client does a wildcard subscription to + Then the Client receives the and + When a of the is updated to the + Then the Client receives the and + And the Client sends an ACK to which the does not respond + +This test covers new behaviour across two different services, and only requires a single new function to be written (the second WHEN step). As the test framework evolves, we are finding it easier to write tests without writing any new code at all. + + +# Setting up the test runner + +While the tests are simple, and hopefully straightforward, it took a decent amount of work to get here. The first difficulty was that we needed a test runner that could adapt itself to each service, while being able to use the same function. Secondly, we needed a way to write linear tests to describe interactions that were not necessarily linear. + +The envoy xDS APIs are built with [gRPC](https://grpc.io/) using bidirectional streams. Not every request from an xDS client should get a response from the server, and the server should send certain responses to the client before they know to request them. The above test is an example of this, where an update to the state should cause the server to send a response without the client's prompting. + +We needed a way to start a service stream and investigate the various calls and responses through the changing state of the entire instance. We did this by using the concurrent patterns built in to go and through designing a service interface. + +Without going too heavily into code, the essential pattern for each test is this: In the beginning GIVEN step we setup the target server using an integrated adapter. In the first WHEN step we initiate an interface for the specified service. This interface is built with a set of channels, a cache for the requests and responses sent along these channels, and functions for managing the stream. + +We start the stream and set up a couple concurrent go routines, initiated with the service channels. These routines listen for new messages and pass them along the stream as needed, and send any responses or any errors back along their respective channels. + +In this way, we can adjust the state of the instance and the action of the client while maintaining an uninterrupted stream. It also allows us to observe all meaningful responses and errors that happen during the lifecycle of the stream. In each of the THEN steps, we use the cache of responses and errors to validate the behaviour and determine whether the test passed. + +This pattern allows for us to linearly describe non-linear behaviour, while the Service interface lets us use the same function across services. + + +# Integrating the suite into an xDS implementation + +Now that we had the design, we needed to verify that it could work with an actual target. A good use case for the xDS conformance suite would be to test a custom control plane implementation, to verify that its behaviour is consistent with any other Envoy control plane. And so, we built an implementation of the [go-control-plane](https://github.com/envoyproxy/go-control-plane), and integrated the framework’s adapter for it. + +Basing our implementation off the awesome example server included in the go-control-plane repo (and their integration tests) we were able to build our own example server to test the framework against. + +An important aspect of the framework is that it needs an adapter API to communicate state changes to the target server outside of the communication happening in the xDS services. I was quite happy when we were able to include our adapter into our go-control-plane implementation with only a few lines of code. + + +# Collaborative work + +We ran our framework against the example target and found that the majority of our tests passed and the ones that did not were highlighting behaviour already described in open issues in the go-control-plane repo. The framework, even in its prototype state, was working as we roughly expected. + +More importantly, though, this framework was helping us articulate behaviour that hadn’t been documented much outside of github issues. We’ve begun to collaborate with the maintainers of the go-control-plane so that their expertise can help strengthen our framework and its adapter, while our tests are helping define and strengthen the behaviour of this control plane. It has been a delight to collaborate on this project, where the work is simultaneously exploratory and concrete, and could help lead to improvements across multiple projects and domains. + + +# Where we go from here + +The framework is still in its earliest stages. We are still implementing the basic tests for the SoTW protocol before moving to more complex behaviours in the newer Delta protocol. I am excited to build out these tests and to run them against an increasing variety of example targets. It is exciting to see the beginnings of the framework used alongside control plane development, to help illuminate and explain the xDS protocol and to ensure our implementations are as strong as they can be! diff --git a/blog/content/pizza.md b/blog/content/pizza.md new file mode 100644 index 0000000..4fc403d --- /dev/null +++ b/blog/content/pizza.md @@ -0,0 +1,8 @@ ++++ +title = "Pizza?" ++++ + +not good for you! OK?! + +MAGIC + diff --git a/blog/org/.auctex-auto/pairing-using-pair.el b/blog/org/.auctex-auto/pairing-using-pair.el new file mode 100644 index 0000000..4b594db --- /dev/null +++ b/blog/org/.auctex-auto/pairing-using-pair.el @@ -0,0 +1,39 @@ +(TeX-add-style-hook + "pairing-using-pair" + (lambda () + (TeX-add-to-alist 'LaTeX-provided-class-options + '(("article" "11pt"))) + (TeX-add-to-alist 'LaTeX-provided-package-options + '(("inputenc" "utf8") ("fontenc" "T1") ("ulem" "normalem"))) + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "href") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperref") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperimage") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "hyperbaseurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "nolinkurl") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "url") + (add-to-list 'LaTeX-verbatim-macros-with-braces-local "path") + (add-to-list 'LaTeX-verbatim-macros-with-delims-local "path") + (TeX-run-style-hooks + "latex2e" + "article" + "art11" + "inputenc" + "fontenc" + "graphicx" + "grffile" + "longtable" + "wrapfig" + "rotating" + "ulem" + "amsmath" + "textcomp" + "amssymb" + "capt-of" + "hyperref") + (LaTeX-add-labels + "sec:orga08efcd" + "sec:orge8589e1" + "sec:org9d02c68" + "sec:orgc18444e")) + :latex) + diff --git a/blog/org/blogging-with-org.org b/blog/org/blogging-with-org.org new file mode 100644 index 0000000..80b1666 --- /dev/null +++ b/blog/org/blogging-with-org.org @@ -0,0 +1,770 @@ +#+HUGO_BASE_DIR: ../ +#+HUGO_SECTION: ./posts +#+HUGO_WEIGHT: auto +#+HUGO_AUTO_SET_LASTMOD: t + +* Org-mode :@org: +** Choosing how to blog with ii :meta: +:PROPERTIES: +:EXPORT_FILE_NAME: posts/choosing-our-blog +:EXPORT_DATE: 2021-02-03 +:EXPORT_HUGO_MENU: :menu "main" +:EXPORT_HUGO_CUSTOM_FRONT_MATTER: :summary "How we are deciding the blogging format to use, and why." +:END: +We want to have a blogging format that fits with our existing org files, or offers a big enough benefit to adjust how we write these org files. +*** Needs +- brings over all the major org syntax + + code blocks + + result blocks + + tags + + links between files +*** Options +**** Firn +**** ox-hugo and Hugo +**** Pandoc and Shell Scripts +**** Gatsby and Orga +** trying out include +:PROPERTIES: +:EXPORT_FILE_NAME: test +:EXPORT_DATE: 2021-02-03 +:EXPORT_HUGO_MENU: :menu "main" +:EXPORT_HUGO_CUSTOM_FRONT_MATTER: :summary "How we are deciding the blogging format to use, and why." +:END: +#+include: "./test.org" export org-mode +* Guides :@guides: +** Deploying Talos to Equinix :kubernetes:equinix:talos: +:PROPERTIES: +:EXPORT_FILE_NAME: deplying-talos-to-equinix +:EXPORT_DATE: 2021-02-03 +:EXPORT_HUGO_MENU: :menu "main" +:EXPORT_HUGO_CUSTOM_FRONT_MATTER: :summary "How we are deciding the blogging format to use, and why." +:END: +*** Introduction + +In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API. + +- What is [[https://cluster-api.sigs.k8s.io/][Cluster-API]]? :: +#+begin_quote +Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. +#+end_quote + +- What is [[https://www.talos.dev/][Talos]]? :: +#+begin_quote +Talos is a modern OS designed to be secure, immutable, and minimal. +#+end_quote + +- What is [[https://metal.equinix.com/][Equinix Metal]]? :: +#+begin_quote +A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes. +#+end_quote +The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities. + +- Why is this important? :: In general: Orchestrating a container based OS such as Talos ([[http://flatcar-linux.org/][Flatcar]], [[https://getfedora.org/coreos/][Fedora CoreOS]], or [[https://rancher.com/products/rancher/][RancherOS]]) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge. + +*** Dependencies + +What you'll need for this guide: + +- [[https://github.com/talos-systems/talos/releases/tag/v0.8.1][talosctl]] + +- [[https://kubernetes.io/docs/tasks/tools/install-kubectl/][kubectl]] + +- [[https://github.com/packethost/packet-cli][packet-cli]] + +- the ID and API token of existing Equinix Metal project + +- an existing Kubernetes cluster with a public IP (such as [[http://kind.sigs.k8s.io/][kind]], [[https://minikube.sigs.k8s.io/][minikube]], or a cluster already on Equinix Metal) + +*** Prelimiary steps + +In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via ~packet-cli~. + +Set the correct project to create and manage resources in: +#+begin_src tmate + read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID +#+end_src + +The API key for your account or project: +#+begin_src tmate + read -p 'PACKET_API_KEY: ' PACKET_API_KEY +#+end_src + +Export the variables to be accessible from ~packet-cli~ and ~clusterctl~ later on: +#+begin_src tmate + export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY +#+end_src + +In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. +We'll need this IP address later for use in booting the servers. +If you have set up your existing cluster differently, it'll just need to be an IP that we can use. +#+begin_src tmate + export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" +#+end_src + +*** Setting up Cluster-API + +Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster: +#+begin_src tmate + clusterctl init -b talos -c talos -i packet +#+end_src + +This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider. + +**Important** note: +- the ~bootstrap-talos~ controller in the ~cabpt-system~ namespace must be running a version greater than ~v0.2.0-alpha.8~. The version can be displayed in with ~clusterctl upgrade plan~ when it's installed. + +*** Setting up Matchbox + +Currently, since Equinix Metal have **not** yet added support for Talos, it is necessary to install [[https://matchbox.psdn.io/][Matchbox]] to boot the servers (There is an [[https://github.com/packethost/packet-images/issues/26][issue]] in progress and [[https://feedback.equinixmetal.com/operating-systems/p/talos-as-officially-supported-operating-system][feedback]] for adding support). + +- What is Matchbox? :: +#+begin_quote +Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. +#+end_quote + +Here is the manifest for a basic matchbox installation: +#+begin_src yaml :tangle ./matchbox.yaml :comments none + apiVersion: apps/v1 + kind: Deployment + metadata: + name: matchbox + spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets + --- + apiVersion: v1 + kind: Service + metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 +#+end_src +Save it as ~matchbox.yaml~ + +The manifests above were inspired by the manifests in the [[https://github.com/poseidon/matchbox/tree/master/contrib/k8s][matchbox repo]]. +For production it might be wise to use: +- an Ingress with full TLS +- a ReadWriteMany storage provider instead hostPath for scaling + +With the manifests ready to go, we'll install Matchbox into the ~matchbox~ namespace on the existing cluster with the following commands: +#+begin_src tmate + kubectl create ns matchbox + kubectl -n matchbox apply -f ./matchbox.yaml +#+end_src + +You may need to patch the ~Service.spec.externalIPs~ to have an IP to access it from if one is not populated: +#+begin_src tmate + kubectl -n matchbox patch \ + service matchbox \ + -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}" +#+end_src + +Once the pod is live, We'll need to create a directory structure for storing Talos boot assets: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos +#+end_src + +Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.8.1 into the assets folder: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + wget -P /var/lib/matchbox/assets/talos \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/initramfs-amd64.xz \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/vmlinuz-amd64 +#+end_src + +Now that the assets have been downloaded, run a checksum against them to verify: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c "cd /var/lib/matchbox/assets/talos && \ + wget -O- https://github.com/talos-systems/talos/releases/download/v0.8.1/sha512sum.txt 2> /dev/null \ + | sed 's,_out/,,g' \ + | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \ + | sha512sum -c -" +#+end_src + +Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it: +#+begin_src tmate + export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}') +#+end_src + +[[https://matchbox.psdn.io/matchbox/#profiles][Profiles in Matchbox]] are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as ~profile-default-amd64.json~ +#+begin_src json :tangle ./profile-default-amd64.json :comments none + { + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } + } +#+end_src + +[[https://matchbox.psdn.io/matchbox/#groups][Groups in Matchbox]] are a way of letting servers pick up profiles based on selectors. Save this file as ~group-default-amd64.json~ +#+begin_src json :tangle ./group-default-amd64.json :comments none + { + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } + } +#+end_src + +We'll copy the profile and group into their respective folders: +#+begin_src tmate + kubectl -n matchbox \ + cp ./profile-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json + kubectl -n matchbox \ + cp ./group-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json +#+end_src + +List the files to validate that they were written correctly: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c 'ls -alh /var/lib/matchbox/*/' +#+end_src + +**** Testing Matchbox + +Using ~curl~, we can verify Matchbox's running state: +#+begin_src tmate + curl http://$LOAD_BALANCER_IP:8080 +#+end_src + +To test matchbox, we'll create an invalid userdata configuration for Talos, saving as ~userdata.txt~: +#+begin_src text :tangle ./userdata.txt :comments none +#!talos +#+end_src +Feel free to use a valid one. + +Now let's talk to Equinix Metal to create a server pointing to the Matchbox server: +#+begin_src tmate + packet-cli device create \ + --hostname talos-pxe-boot-test-1 \ + --plan c1.small.x86 \ + --facility sjc1 \ + --operating-system custom_ipxe \ + --project-id "$PACKET_PROJECT_ID" \ + --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \ + --userdata-file=./userdata.txt +#+end_src + +In the meanwhile, we can watch the logs to see how things are: +#+begin_src tmate + kubectl -n matchbox logs deployment/matchbox -f --tail=100 +#+end_src + +Looking at the logs, there should be some get requests of resources that will be used to boot the OS. + +Notes: +- fun fact: you can run Matchbox on Android using [[https://f-droid.org/en/packages/com.termux/][Termux]]. + +*** The cluster + +**** Preparing the cluster + +Here we will declare the template that we will shortly generate our usable cluster from: +#+begin_src yaml :tangle ./talos-packet-cluster-template.yaml :comments none + kind: TalosControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/packethost/packet-ccm/releases/download/v1.1.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "${CLUSTER_NAME}" + spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + metadata: + name: "${CLUSTER_NAME}" + spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: MachineDeployment + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + spec: + template: + spec: + generateType: init +#+end_src + +Inside of ~TalosControlPlane.spec.controlPlaneConfig.init~, I'm very much liking the use of ~generateType: init~ paired with ~configPatches~. This enables: +- configuration to be generated; +- management of certificates out of the cluster operator's hands; +- another level of standardisation; and +- overrides to be added where needed + +Notes: +- the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0 + +***** Cluster name :noexport: +#+name: cluster-config-env-name +#+begin_src bash + export CLUSTER_NAME="talos-metal" +#+end_src + +***** Templating your configuration + +Set environment variables for configuration: +#+name: cluster-config-env +#+begin_src bash :noweb yes + <> + export FACILITY=sjc1 + export KUBERNETES_VERSION=v1.20.2 + export POD_CIDR=10.244.0.0/16 + export SERVICE_CIDR=10.96.0.0/12 + export CONTROLPLANE_NODE_TYPE=c1.small.x86 + export CONTROL_PLANE_MACHINE_COUNT=3 + export WORKER_NODE_TYPE=c1.small.x86 + export WORKER_MACHINE_COUNT=0 + export SSH_KEY="" + export IPXE_URL=$LOAD_BALANCER_IP +#+end_src + +In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads. + +****** Apply the variables :noexport: + +Set the env in the tmate session: +#+begin_src tmate :noweb yes + <> +#+end_src + +***** Render the manifests +Render your cluster configuration from the template: +#+begin_src tmate :noweb yes + clusterctl config cluster "$CLUSTER_NAME" \ + --from ./talos-packet-cluster-template.yaml \ + -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +**** Creating the cluster + +With the template for the cluster rendered to how wish to deploy it, it's now time to apply it: +#+begin_src tmate + kubectl create ns "$CLUSTER_NAME" + kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +The cluster will now be brought up, we can see the progress by taking a look at the resources: +#+begin_src tmate + kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters +#+end_src + +Note: As expected, the cluster may take some time to appear and be accessible. + +Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with: +#+begin_src tmate + kubectl -n "$CLUSTER_NAME" get secrets \ + "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \ + | base64 -d > $HOME/.kube/"$CLUSTER_NAME" +#+end_src + +Using the KubeConfig from the new cluster, check out the status of it: +#+begin_src tmate + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info +#+end_src + +Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal: +#+begin_src tmate + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \ + create secret generic packet-cloud-config \ + --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}" +#+end_src + +Since we're able to talk to the APIServer, we can check how all Pods are doing: +#+name: list all Pods +#+begin_src bash :noweb yes + <> + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\ + -n kube-system get pods +#+end_src + +Listing Pods shows that everything is live and in a good state: +#+RESULTS: list all Pods +#+begin_src bash +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-5b55f9f688-fb2cb 1/1 Running 0 25m +kube-system coredns-5b55f9f688-qsvg5 1/1 Running 0 25m +kube-system kube-apiserver-665px 1/1 Running 0 19m +kube-system kube-apiserver-mz68q 1/1 Running 0 19m +kube-system kube-apiserver-qfklt 1/1 Running 2 19m +kube-system kube-controller-manager-6grxd 1/1 Running 0 19m +kube-system kube-controller-manager-cf76h 1/1 Running 0 19m +kube-system kube-controller-manager-dsmgf 1/1 Running 0 19m +kube-system kube-flannel-brdxw 1/1 Running 0 24m +kube-system kube-flannel-dm85d 1/1 Running 0 24m +kube-system kube-flannel-sg6k9 1/1 Running 0 24m +kube-system kube-proxy-flx59 1/1 Running 0 24m +kube-system kube-proxy-gbn4l 1/1 Running 0 24m +kube-system kube-proxy-ns84v 1/1 Running 0 24m +kube-system kube-scheduler-4qhjw 1/1 Running 0 19m +kube-system kube-scheduler-kbm5z 1/1 Running 0 19m +kube-system kube-scheduler-klsmp 1/1 Running 0 19m +kube-system packet-cloud-controller-manager-77cd8c9c7c-cdzfv 1/1 Running 0 20m +kube-system pod-checkpointer-4szh6 1/1 Running 0 19m +kube-system pod-checkpointer-4szh6-talos-metal-control-plane-j29lb 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j 1/1 Running 0 19m +#+end_src + +With the cluster live, it's now ready for workloads to be deployed! + +*** Talos Configuration + +In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use. + +Create the directory for the config: +#+begin_src tmate + mkdir -p $HOME/.talos +#+end_src + +Discover the IP for the first controlPlane: +#+begin_src tmate + export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \ + get machines \ + $(kubectl -n "$CLUSTER_NAME" \ + get machines -l cluster.x-k8s.io/control-plane='' \ + --no-headers --output=jsonpath='{.items[0].metadata.name}') \ + -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}') +#+end_src + +Fetch the ~talosconfig~ from the existing cluster: +#+begin_src tmate + kubectl get talosconfig \ + -n $CLUSTER_NAME \ + -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \ + -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml +#+end_src + +Write in the configuration the endpoint IP and node IP: +#+begin_src tmate + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config endpoint $TALOS_ENDPOINT + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config node $TALOS_ENDPOINT +#+end_src + +Now that the ~talosconfig~ has been written, try listing all containers: +#+name: list-containers-on-containerd +#+begin_src bash :noweb yes + <> + # removing ip; omit ` | sed ...` for regular use + talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x "/ +#+end_src + +Here's the containers running on this particular node, in containerd (not k8s related): +#+RESULTS: list-containers-on-containerd +#+begin_src bash +NODE NAMESPACE ID IMAGE PID STATUS +x.x.x.x system apid talos/apid 3046 RUNNING +x.x.x.x system etcd gcr.io/etcd-development/etcd:v3.4.14 3130 RUNNING +x.x.x.x system networkd talos/networkd 2879 RUNNING +x.x.x.x system routerd talos/routerd 2888 RUNNING +x.x.x.x system timed talos/timed 2976 RUNNING +x.x.x.x system trustd talos/trustd 3047 RUNNING +#+end_src + +*** Clean up + +Tearing down the entire cluster and resources associated with it, can be achieved by + +i. Deleting the cluster: +#+begin_src tmate + kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME" +#+end_src + +ii. Deleting the namespace: +#+begin_src tmate + kubectl delete ns "$CLUSTER_NAME" +#+end_src + +iii. Removing local configurations: +#+begin_src tmate + rm \ + $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + $HOME/.kube/"$CLUSTER_NAME" +#+end_src + +*** What have I learned from this? +- (always learning) how wonderful the Kubernetes community is :: there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group. +- how modular Cluster-API is :: Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways. + +*** Credits +Integrating Talos into this project would not be possible without help from [[https://github.com/andrewrynhard][Andrew Rynhard (Talos Systems)]], huge thanks to him for reaching out for pairing and co-authoring. + +*** Notes and references +- with the new cluster's controlPlane live and available for deployment, the iPXE server could be moved into that cluster - meaning that new servers boot from the cluster that they'll join, making it almost self-contained +- cluster configuration as based off of [[https://github.com/kubernetes-sigs/cluster-api-provider-packet/blob/479faf06e1337b1e979cb624ca8be015b2a89cde/templates/cluster-template.yaml][cluster-template.yaml from the cluster-api-provider-packet repo]] +- this post has been made to [[https://blog.calebwoodbine.com/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal][blog.calebwoodine.com]], and [[https://ii.coop/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal/][talos-system.com/blog]], but is also available as an [[https://github.com/ii/org/blob/master/ii/equinix-metal-capi-talos-kubernetes/README.org][Org file]] + +----- + +Hope you've enjoyed the output of this project! +Thank you! + +*** Footnotes + +#+REVEAL_ROOT: https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.9.2 +#+NOREVEAL_ROOT: https://raw.githubusercontent.com/hakimel/reveal.js/3.9.2/ +# #+REVEAL_TITLE_SLIDE: +#+NOREVEAL_DEFAULT_FRAG_STYLE: YY +#+NOREVEAL_EXTRA_CSS: YY +#+NOREVEAL_EXTRA_JS: YY +#+REVEAL_HLEVEL: 2 +#+REVEAL_MARGIN: 0.1 +#+REVEAL_WIDTH: 1000 +#+REVEAL_HEIGHT: 600 +#+REVEAL_MAX_SCALE: 3.5 +#+REVEAL_MIN_SCALE: 0.2 +#+REVEAL_PLUGINS: (markdown notes highlight multiplex) +#+REVEAL_SLIDE_NUMBER: "" +#+REVEAL_SPEED: 1 +#+REVEAL_THEME: moon +#+REVEAL_THEME_OPTIONS: beige|black|blood|league|moon|night|serif|simple|sky|solarized|white +#+REVEAL_TRANS: cube +#+REVEAL_TRANS_OPTIONS: none|cube|fade|concave|convex|page|slide|zoom + +#+OPTIONS: num:nil +#+OPTIONS: toc:nil +#+OPTIONS: mathjax:Y +#+OPTIONS: reveal_single_file:nil +#+OPTIONS: reveal_control:t +#+OPTIONS: reveal-progress:t +#+OPTIONS: reveal_history:nil +#+OPTIONS: reveal_center:t +#+OPTIONS: reveal_rolling_links:nil +#+OPTIONS: reveal_keyboard:t +#+OPTIONS: reveal_overview:t +* Scratch* Footnotes diff --git a/blog/org/deploying-talos-to-equinix.org b/blog/org/deploying-talos-to-equinix.org new file mode 100644 index 0000000..b0b8cad --- /dev/null +++ b/blog/org/deploying-talos-to-equinix.org @@ -0,0 +1,743 @@ +#+HUGO_BASE_DIR: ../ +#+HUGO_SECTION: ./ +#+HUGO_WEIGHT: auto +#+HUGO_AUTO_SET_LASTMOD: t +#+TITLE: Cluster-API + Talos + Equinix Metal +#+AUTHOR: Caleb Woodbine +#+AUTHOR: Andrew Rynhard +#+DATE: 21st of Janurary 2020 +#+DATE_CREATED: 2021-01-21 +#+DATE_UPDATED: 2021-02-02 +#+HUGO_TAGS: kubernetes talos equinix +#+HUGO_CATEGORIES: guides +#+FIRN_SUMMARY: A guide to launching a highly-available cluster with Equinix and Talos +#+PROPERTY: header-args:tmate+ :dir . :window capi-talos-metal + +* Introduction + +In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API. + +- What is [[https://cluster-api.sigs.k8s.io/][Cluster-API]]? :: +#+begin_quote +Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. +#+end_quote + +- What is [[https://www.talos.dev/][Talos]]? :: +#+begin_quote +Talos is a modern OS designed to be secure, immutable, and minimal. +#+end_quote + +- What is [[https://metal.equinix.com/][Equinix Metal]]? :: +#+begin_quote +A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes. +#+end_quote +The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities. + +- Why is this important? :: In general: Orchestrating a container based OS such as Talos ([[http://flatcar-linux.org/][Flatcar]], [[https://getfedora.org/coreos/][Fedora CoreOS]], or [[https://rancher.com/products/rancher/][RancherOS]]) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge. + +* Dependencies + +What you'll need for this guide: + +- [[https://github.com/talos-systems/talos/releases/tag/v0.8.1][talosctl]] + +- [[https://kubernetes.io/docs/tasks/tools/install-kubectl/][kubectl]] + +- [[https://github.com/packethost/packet-cli][packet-cli]] + +- the ID and API token of existing Equinix Metal project + +- an existing Kubernetes cluster with a public IP (such as [[http://kind.sigs.k8s.io/][kind]], [[https://minikube.sigs.k8s.io/][minikube]], or a cluster already on Equinix Metal) + +* Prelimiary steps + +In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via ~packet-cli~. + +Set the correct project to create and manage resources in: +#+begin_src tmate + read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID +#+end_src + +The API key for your account or project: +#+begin_src tmate + read -p 'PACKET_API_KEY: ' PACKET_API_KEY +#+end_src + +Export the variables to be accessible from ~packet-cli~ and ~clusterctl~ later on: +#+begin_src tmate + export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY +#+end_src + +In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. +We'll need this IP address later for use in booting the servers. +If you have set up your existing cluster differently, it'll just need to be an IP that we can use. +#+begin_src tmate + export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" +#+end_src + +* Setting up Cluster-API + +Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster: +#+begin_src tmate + clusterctl init -b talos -c talos -i packet +#+end_src + +This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider. + +**Important** note: +- the ~bootstrap-talos~ controller in the ~cabpt-system~ namespace must be running a version greater than ~v0.2.0-alpha.8~. The version can be displayed in with ~clusterctl upgrade plan~ when it's installed. + +* Setting up Matchbox + +Currently, since Equinix Metal have **not** yet added support for Talos, it is necessary to install [[https://matchbox.psdn.io/][Matchbox]] to boot the servers (There is an [[https://github.com/packethost/packet-images/issues/26][issue]] in progress and [[https://feedback.equinixmetal.com/operating-systems/p/talos-as-officially-supported-operating-system][feedback]] for adding support). + +- What is Matchbox? :: +#+begin_quote +Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. +#+end_quote + +Here is the manifest for a basic matchbox installation: +#+begin_src yaml :tangle ./matchbox.yaml :comments none + apiVersion: apps/v1 + kind: Deployment + metadata: + name: matchbox + spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets + --- + apiVersion: v1 + kind: Service + metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 +#+end_src +Save it as ~matchbox.yaml~ + +The manifests above were inspired by the manifests in the [[https://github.com/poseidon/matchbox/tree/master/contrib/k8s][matchbox repo]]. +For production it might be wise to use: +- an Ingress with full TLS +- a ReadWriteMany storage provider instead hostPath for scaling + +With the manifests ready to go, we'll install Matchbox into the ~matchbox~ namespace on the existing cluster with the following commands: +#+begin_src tmate + kubectl create ns matchbox + kubectl -n matchbox apply -f ./matchbox.yaml +#+end_src + +You may need to patch the ~Service.spec.externalIPs~ to have an IP to access it from if one is not populated: +#+begin_src tmate + kubectl -n matchbox patch \ + service matchbox \ + -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}" +#+end_src + +Once the pod is live, We'll need to create a directory structure for storing Talos boot assets: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos +#+end_src + +Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.8.1 into the assets folder: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + wget -P /var/lib/matchbox/assets/talos \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/initramfs-amd64.xz \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/vmlinuz-amd64 +#+end_src + +Now that the assets have been downloaded, run a checksum against them to verify: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c "cd /var/lib/matchbox/assets/talos && \ + wget -O- https://github.com/talos-systems/talos/releases/download/v0.8.1/sha512sum.txt 2> /dev/null \ + | sed 's,_out/,,g' \ + | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \ + | sha512sum -c -" +#+end_src + +Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it: +#+begin_src tmate + export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}') +#+end_src + +[[https://matchbox.psdn.io/matchbox/#profiles][Profiles in Matchbox]] are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as ~profile-default-amd64.json~ +#+begin_src json :tangle ./profile-default-amd64.json :comments none + { + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } + } +#+end_src + +[[https://matchbox.psdn.io/matchbox/#groups][Groups in Matchbox]] are a way of letting servers pick up profiles based on selectors. Save this file as ~group-default-amd64.json~ +#+begin_src json :tangle ./group-default-amd64.json :comments none + { + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } + } +#+end_src + +We'll copy the profile and group into their respective folders: +#+begin_src tmate + kubectl -n matchbox \ + cp ./profile-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json + kubectl -n matchbox \ + cp ./group-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json +#+end_src + +List the files to validate that they were written correctly: +#+begin_src tmate + kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c 'ls -alh /var/lib/matchbox/*/' +#+end_src + +** Testing Matchbox + +Using ~curl~, we can verify Matchbox's running state: +#+begin_src tmate + curl http://$LOAD_BALANCER_IP:8080 +#+end_src + +To test matchbox, we'll create an invalid userdata configuration for Talos, saving as ~userdata.txt~: +#+begin_src text :tangle ./userdata.txt :comments none +#!talos +#+end_src +Feel free to use a valid one. + +Now let's talk to Equinix Metal to create a server pointing to the Matchbox server: +#+begin_src tmate + packet-cli device create \ + --hostname talos-pxe-boot-test-1 \ + --plan c1.small.x86 \ + --facility sjc1 \ + --operating-system custom_ipxe \ + --project-id "$PACKET_PROJECT_ID" \ + --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \ + --userdata-file=./userdata.txt +#+end_src + +In the meanwhile, we can watch the logs to see how things are: +#+begin_src tmate + kubectl -n matchbox logs deployment/matchbox -f --tail=100 +#+end_src + +Looking at the logs, there should be some get requests of resources that will be used to boot the OS. + +Notes: +- fun fact: you can run Matchbox on Android using [[https://f-droid.org/en/packages/com.termux/][Termux]]. + +* The cluster + +** Preparing the cluster + +Here we will declare the template that we will shortly generate our usable cluster from: +#+begin_src yaml :tangle ./talos-packet-cluster-template.yaml :comments none + kind: TalosControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/packethost/packet-ccm/releases/download/v1.1.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "${CLUSTER_NAME}" + spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + metadata: + name: "${CLUSTER_NAME}" + spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: MachineDeployment + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + spec: + template: + spec: + generateType: init +#+end_src + +Inside of ~TalosControlPlane.spec.controlPlaneConfig.init~, I'm very much liking the use of ~generateType: init~ paired with ~configPatches~. This enables: +- configuration to be generated; +- management of certificates out of the cluster operator's hands; +- another level of standardisation; and +- overrides to be added where needed + +Notes: +- the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0 + +*** Cluster name :noexport: +#+name: cluster-config-env-name +#+begin_src bash + export CLUSTER_NAME="talos-metal" +#+end_src + +*** Templating your configuration + +Set environment variables for configuration: +#+name: cluster-config-env +#+begin_src bash :noweb yes + <> + export FACILITY=sjc1 + export KUBERNETES_VERSION=v1.20.2 + export POD_CIDR=10.244.0.0/16 + export SERVICE_CIDR=10.96.0.0/12 + export CONTROLPLANE_NODE_TYPE=c1.small.x86 + export CONTROL_PLANE_MACHINE_COUNT=3 + export WORKER_NODE_TYPE=c1.small.x86 + export WORKER_MACHINE_COUNT=0 + export SSH_KEY="" + export IPXE_URL=$LOAD_BALANCER_IP +#+end_src + +In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads. + +**** Apply the variables :noexport: + +Set the env in the tmate session: +#+begin_src tmate :noweb yes + <> +#+end_src + +*** Render the manifests +Render your cluster configuration from the template: +#+begin_src tmate :noweb yes + clusterctl config cluster "$CLUSTER_NAME" \ + --from ./talos-packet-cluster-template.yaml \ + -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +** Creating the cluster + +With the template for the cluster rendered to how wish to deploy it, it's now time to apply it: +#+begin_src tmate + kubectl create ns "$CLUSTER_NAME" + kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +The cluster will now be brought up, we can see the progress by taking a look at the resources: +#+begin_src tmate + kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters +#+end_src + +Note: As expected, the cluster may take some time to appear and be accessible. + +Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with: +#+begin_src tmate + kubectl -n "$CLUSTER_NAME" get secrets \ + "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \ + | base64 -d > $HOME/.kube/"$CLUSTER_NAME" +#+end_src + +Using the KubeConfig from the new cluster, check out the status of it: +#+begin_src tmate + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info +#+end_src + +Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal: +#+begin_src tmate + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \ + create secret generic packet-cloud-config \ + --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}" +#+end_src + +Since we're able to talk to the APIServer, we can check how all Pods are doing: +#+name: list all Pods +#+begin_src bash :noweb yes + <> + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\ + -n kube-system get pods +#+end_src + +Listing Pods shows that everything is live and in a good state: +#+RESULTS: list all Pods +#+begin_src bash +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-5b55f9f688-fb2cb 1/1 Running 0 25m +kube-system coredns-5b55f9f688-qsvg5 1/1 Running 0 25m +kube-system kube-apiserver-665px 1/1 Running 0 19m +kube-system kube-apiserver-mz68q 1/1 Running 0 19m +kube-system kube-apiserver-qfklt 1/1 Running 2 19m +kube-system kube-controller-manager-6grxd 1/1 Running 0 19m +kube-system kube-controller-manager-cf76h 1/1 Running 0 19m +kube-system kube-controller-manager-dsmgf 1/1 Running 0 19m +kube-system kube-flannel-brdxw 1/1 Running 0 24m +kube-system kube-flannel-dm85d 1/1 Running 0 24m +kube-system kube-flannel-sg6k9 1/1 Running 0 24m +kube-system kube-proxy-flx59 1/1 Running 0 24m +kube-system kube-proxy-gbn4l 1/1 Running 0 24m +kube-system kube-proxy-ns84v 1/1 Running 0 24m +kube-system kube-scheduler-4qhjw 1/1 Running 0 19m +kube-system kube-scheduler-kbm5z 1/1 Running 0 19m +kube-system kube-scheduler-klsmp 1/1 Running 0 19m +kube-system packet-cloud-controller-manager-77cd8c9c7c-cdzfv 1/1 Running 0 20m +kube-system pod-checkpointer-4szh6 1/1 Running 0 19m +kube-system pod-checkpointer-4szh6-talos-metal-control-plane-j29lb 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j 1/1 Running 0 19m +#+end_src + +With the cluster live, it's now ready for workloads to be deployed! + +* Talos Configuration + +In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use. + +Create the directory for the config: +#+begin_src tmate + mkdir -p $HOME/.talos +#+end_src + +Discover the IP for the first controlPlane: +#+begin_src tmate + export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \ + get machines \ + $(kubectl -n "$CLUSTER_NAME" \ + get machines -l cluster.x-k8s.io/control-plane='' \ + --no-headers --output=jsonpath='{.items[0].metadata.name}') \ + -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}') +#+end_src + +Fetch the ~talosconfig~ from the existing cluster: +#+begin_src tmate + kubectl get talosconfig \ + -n $CLUSTER_NAME \ + -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \ + -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml +#+end_src + +Write in the configuration the endpoint IP and node IP: +#+begin_src tmate + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config endpoint $TALOS_ENDPOINT + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config node $TALOS_ENDPOINT +#+end_src + +Now that the ~talosconfig~ has been written, try listing all containers: +#+name: list-containers-on-containerd +#+begin_src bash :noweb yes + <> + # removing ip; omit ` | sed ...` for regular use + talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x "/ +#+end_src + +Here's the containers running on this particular node, in containerd (not k8s related): +#+RESULTS: list-containers-on-containerd +#+begin_src bash +NODE NAMESPACE ID IMAGE PID STATUS +x.x.x.x system apid talos/apid 3046 RUNNING +x.x.x.x system etcd gcr.io/etcd-development/etcd:v3.4.14 3130 RUNNING +x.x.x.x system networkd talos/networkd 2879 RUNNING +x.x.x.x system routerd talos/routerd 2888 RUNNING +x.x.x.x system timed talos/timed 2976 RUNNING +x.x.x.x system trustd talos/trustd 3047 RUNNING +#+end_src + +* Clean up + +Tearing down the entire cluster and resources associated with it, can be achieved by + +i. Deleting the cluster: +#+begin_src tmate + kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME" +#+end_src + +ii. Deleting the namespace: +#+begin_src tmate + kubectl delete ns "$CLUSTER_NAME" +#+end_src + +iii. Removing local configurations: +#+begin_src tmate + rm \ + $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + $HOME/.kube/"$CLUSTER_NAME" +#+end_src + +* What have I learned from this? +- (always learning) how wonderful the Kubernetes community is :: there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group. +- how modular Cluster-API is :: Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways. + +* Credits +Integrating Talos into this project would not be possible without help from [[https://github.com/andrewrynhard][Andrew Rynhard (Talos Systems)]], huge thanks to him for reaching out for pairing and co-authoring. + +* Notes and references +- with the new cluster's controlPlane live and available for deployment, the iPXE server could be moved into that cluster - meaning that new servers boot from the cluster that they'll join, making it almost self-contained +- cluster configuration as based off of [[https://github.com/kubernetes-sigs/cluster-api-provider-packet/blob/479faf06e1337b1e979cb624ca8be015b2a89cde/templates/cluster-template.yaml][cluster-template.yaml from the cluster-api-provider-packet repo]] +- this post has been made to [[https://blog.calebwoodbine.com/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal][blog.calebwoodine.com]], and [[https://ii.coop/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal/][talos-system.com/blog]], but is also available as an [[https://github.com/ii/org/blob/master/ii/equinix-metal-capi-talos-kubernetes/README.org][Org file]] + +----- + +Hope you've enjoyed the output of this project! +Thank you! + +* Footnotes + +#+REVEAL_ROOT: https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.9.2 +#+NOREVEAL_ROOT: https://raw.githubusercontent.com/hakimel/reveal.js/3.9.2/ +# #+REVEAL_TITLE_SLIDE: +#+NOREVEAL_DEFAULT_FRAG_STYLE: YY +#+NOREVEAL_EXTRA_CSS: YY +#+NOREVEAL_EXTRA_JS: YY +#+REVEAL_HLEVEL: 2 +#+REVEAL_MARGIN: 0.1 +#+REVEAL_WIDTH: 1000 +#+REVEAL_HEIGHT: 600 +#+REVEAL_MAX_SCALE: 3.5 +#+REVEAL_MIN_SCALE: 0.2 +#+REVEAL_PLUGINS: (markdown notes highlight multiplex) +#+REVEAL_SLIDE_NUMBER: "" +#+REVEAL_SPEED: 1 +#+REVEAL_THEME: moon +#+REVEAL_THEME_OPTIONS: beige|black|blood|league|moon|night|serif|simple|sky|solarized|white +#+REVEAL_TRANS: cube +#+REVEAL_TRANS_OPTIONS: none|cube|fade|concave|convex|page|slide|zoom + +#+OPTIONS: num:nil +#+OPTIONS: toc:nil +#+OPTIONS: mathjax:Y +#+OPTIONS: reveal_single_file:nil +#+OPTIONS: reveal_control:t +#+OPTIONS: reveal-progress:t +#+OPTIONS: reveal_history:nil +#+OPTIONS: reveal_center:t +#+OPTIONS: reveal_rolling_links:nil +#+OPTIONS: reveal_keyboard:t +#+OPTIONS: reveal_overview:t diff --git a/blog/org/rerouting-container-registries-with-envoy.org b/blog/org/rerouting-container-registries-with-envoy.org new file mode 100644 index 0000000..37fc33b --- /dev/null +++ b/blog/org/rerouting-container-registries-with-envoy.org @@ -0,0 +1,166 @@ +* Introduction +In this post, I will detail the discovery of Envoy's dynamic rewriting location capabilities and the relationship to OCI registries. + +**What is [[https://www.envoyproxy.io/][Envoy]]?** + #+BEGIN_QUOTE + open source edge and service proxy, designed for cloud-native applications + #+END_QUOTE + +**What is an [[https://opencontainers.org/][OCI container registry]]?** + #+BEGIN_QUOTE + a standard, and specification, for the implementation of container registries + #+END_QUOTE + +I've been playing around with and learning Envoy for a number of months now. One of the concepts I'm investigating is rewriting the request's host. +Envoy is a super powerful piece of software. It is flexible and highly dynamic. + +* Journey +** My expectations +The goal is to set up Envoy on a host to rewrite all requests dynamically back to a container registry hosted by a cloud-provider, such as GCP. + +** Initial discoveries +One of the first things I investigated was the ability to get traffic from one site and serve it on another (proxying). +I searched in the docs and in their [[https://www.envoyproxy.io/docs/envoy/v1.17.1/start/quick-start/configuration-static][most basic example]] could see that, by using envoy's http filter in the filter_chains, a static host could be rewritten. + +Example: +#+BEGIN_SRC yaml +... +static_resources: + listeners: + - name: listener_0 + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: /dev/stdout + http_filters: + - name: envoy.filters.http.router + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + host_rewrite_literal: www.envoyproxy.io + cluster: service_envoyproxy_io +... +#+END_SRC + +This is a great start! This serves the site and its content under the host where Envoy is served. +However, the host in the rewrite is static and not dynamic. It seems at this point like doing the implementation this way is not viable. + +** Learning about filter-chains +Envoy has the lovely feature to set many kinds of middleware in the middle of a request. +This middleware can be used to add/change/remove things from the request. +Envoy is particularly good at HTTP related filtering. It also supports such features as dynamic forward proxy, JWT auth, health checks, and rate limiting. + +The functionality is infinitely useful as filters can be such things as gRPC, PostgreSQL, Wasm, and even Lua. + +** The implementation +Once I found the ability to write Lua as a filter, I found that it provided enough capability to perform the dynamic host rewrite. + +#+BEGIN_SRC yaml +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + local reg1 = "k8s.gcr.io" + local reg2 = "registry-1.docker.io" + local reg2WithIP = "192.168.0.1" + function envoy_on_request(request_handle) + local reg = reg1 + remoteAddr = request_handle:headers():get("x-real-ip") + if remoteAddr == reg2WithIP then + request_handle:logInfo("remoteAddr: "..reg2WithIP) + reg = reg2 + end + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ii.coop + port_value: 443 +#+END_SRC + +With envoy running this config, the behaviour of the requests is +- rewrite all traffic hitting the web service to /k8s.gcr.io/ +- except if the IP is /192.168.0.1/ then set the location to /registry-1.docker.io/ + +Since I'm using a [[https://github.com/sharingio/pair][Pair]] instance, it sets the local subnet to /192.168.0.0/24/ so when I try to =docker pull humacs-envoy-10000.$SHARINGIO_PAIR_BASE_DNS_NAME/library/postgres:12-alpine= it will go to /docker.io/. + +On my local machine, pulling container images using =docker pull humacs-envoy-10000.$SHARINGIO_PAIR_BASE_DNS_NAME/e2e-test-images/agnhost:2.26= will instead use /k8s.gcr.io/. + +To achieve this, I research how other http libraries handle redirects - namely [[https://golang.org/src/net/http/server.go?s=66471:66536#L2179][Golang's net/http.Redirect]]. +The main things that Golang's /http.Redirect/ does is: +- set the /content-type/ header to /text/html/ +- set the location to the destination +- set the status code to 302 +- set the body to the same data in earlier steps, but in an /a/ tag. + +* Final thoughts +I'm learning that Envoy is highly flexible and seemly limitless in it's capabilities. + +It's exciting to see Envoy being adopted in so many places - moreover to see the diverse usecases and implementations. + +Big shout out to [[https://ii.coop/author/zach-mandeville][Zach]] for pairing on this with a few different aspects and attempts! (Zach is cool:tm:) diff --git a/blog/org/test.org b/blog/org/test.org new file mode 100644 index 0000000..4bbdd7f --- /dev/null +++ b/blog/org/test.org @@ -0,0 +1,29 @@ +#+TITLE: Test.org + +* something cool +this is something cool and my thoughts /on it/. +I can do small code blocks +: print("like so") + + +* something fun +I can also do bigger code blocks +#+begin_src js +console.log("like so") +#+end_src +* Inline code +there are a couple ways to do inline code +** code block +you use = for this to make =inline code= +** preformatted +you use ~ for this to make ~preformatted text~ +* something sassy +I can make a nice list of things: +- oranges +- bananas +- apples + +can I also do definitions? +- something :: is cool + +- veranda :: a covered patio diff --git a/blog/static/favicon.ico b/blog/static/favicon.ico new file mode 100644 index 0000000..7ce8680 Binary files /dev/null and b/blog/static/favicon.ico differ diff --git a/blog/static/ii-style.css b/blog/static/ii-style.css new file mode 100644 index 0000000..6153268 --- /dev/null +++ b/blog/static/ii-style.css @@ -0,0 +1,89 @@ +div.summary { + font-size: 1rem; + color: gray; +} +div.summary p { + padding: 0; + margin-top: 0.5rem; +} + +.author { + padding: 3px; +} + +div.authors { + margin-top: 1rem; +} + +a.author { + margin-right: 0.25rem; +} +p.purpose { + max-width: 30rem; +} + +div#about section#humans { + display: flex; + flex-flow: column; +} + +div#about h2 { + font-size: 1.5rem; + border-top: 1px solid black; +} + +section#humans article { + margin: 1rem 0 1rem 0; +} + +section#humans article h3 { + font-size: 1rem; + font-weight: 600; +} + +section#humans div.profile-photo, div.profile-info > div.profile-photo { + margin: 1rem auto 1rem auto; + position: relative; + border-radius: 50%; + height: 200px; + width: 200px; + overflow: hidden; + background-position: center; + background-size: cover; + background-repeat: no-repeat; +} + +section#humans article div.content.long { + height: 200px; + overflow: hidden; +} + +button.read-more { + font-size: 1rem; + outline: 0; + border: 0; + background: unset; + cursor: pointer; +} + +button.read-more:hover { + color: dodgerblue; +} + +div.profile-info { + max-width: 34rem; + width: 100%; + margin: 0 auto; +} + +#humans a { + color: #000; +} + +@media(min-width: 800px) { + div#about section#humans { + display: grid; + grid-template-columns: 1fr 1fr; + grid-column-gap: 7rem; + } +} diff --git a/blog/static/images/2015/10/nc_map.jpg b/blog/static/images/2015/10/nc_map.jpg new file mode 100644 index 0000000..277f171 Binary files /dev/null and b/blog/static/images/2015/10/nc_map.jpg differ diff --git a/blog/static/images/2015/10/nc_map.png b/blog/static/images/2015/10/nc_map.png new file mode 100644 index 0000000..e420eb7 Binary files /dev/null and b/blog/static/images/2015/10/nc_map.png differ diff --git a/blog/static/images/2015/10/w541_chef.png b/blog/static/images/2015/10/w541_chef.png new file mode 100644 index 0000000..428b4a4 Binary files /dev/null and b/blog/static/images/2015/10/w541_chef.png differ diff --git a/blog/static/images/2016/09/2016-09-07-085051_854x469_escrotum.png b/blog/static/images/2016/09/2016-09-07-085051_854x469_escrotum.png new file mode 100644 index 0000000..4e649ca Binary files /dev/null and b/blog/static/images/2016/09/2016-09-07-085051_854x469_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-07-090344_837x471_escrotum.png b/blog/static/images/2016/09/2016-09-07-090344_837x471_escrotum.png new file mode 100644 index 0000000..9c11d71 Binary files /dev/null and b/blog/static/images/2016/09/2016-09-07-090344_837x471_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-12-123313_914x504_escrotum.png b/blog/static/images/2016/09/2016-09-12-123313_914x504_escrotum.png new file mode 100644 index 0000000..02b36f9 Binary files /dev/null and b/blog/static/images/2016/09/2016-09-12-123313_914x504_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-12-123702_1679x532_escrotum.png b/blog/static/images/2016/09/2016-09-12-123702_1679x532_escrotum.png new file mode 100644 index 0000000..8799f43 Binary files /dev/null and b/blog/static/images/2016/09/2016-09-12-123702_1679x532_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-13-104020_1330x424_escrotum-1.png b/blog/static/images/2016/09/2016-09-13-104020_1330x424_escrotum-1.png new file mode 100644 index 0000000..43f0e7f Binary files /dev/null and b/blog/static/images/2016/09/2016-09-13-104020_1330x424_escrotum-1.png differ diff --git a/blog/static/images/2016/09/2016-09-13-104020_1330x424_escrotum.png b/blog/static/images/2016/09/2016-09-13-104020_1330x424_escrotum.png new file mode 100644 index 0000000..43f0e7f Binary files /dev/null and b/blog/static/images/2016/09/2016-09-13-104020_1330x424_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-13-115634_646x278_escrotum.png b/blog/static/images/2016/09/2016-09-13-115634_646x278_escrotum.png new file mode 100644 index 0000000..933c690 Binary files /dev/null and b/blog/static/images/2016/09/2016-09-13-115634_646x278_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-13-120248_415x462_escrotum.png b/blog/static/images/2016/09/2016-09-13-120248_415x462_escrotum.png new file mode 100644 index 0000000..f888572 Binary files /dev/null and b/blog/static/images/2016/09/2016-09-13-120248_415x462_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-13-120912_650x537_escrotum.png b/blog/static/images/2016/09/2016-09-13-120912_650x537_escrotum.png new file mode 100644 index 0000000..3b5bff9 Binary files /dev/null and b/blog/static/images/2016/09/2016-09-13-120912_650x537_escrotum.png differ diff --git a/blog/static/images/2016/09/2016-09-13-121209_824x565_escrotum.png b/blog/static/images/2016/09/2016-09-13-121209_824x565_escrotum.png new file mode 100644 index 0000000..33920be Binary files /dev/null and b/blog/static/images/2016/09/2016-09-13-121209_824x565_escrotum.png differ diff --git a/blog/static/images/2016/09/TactileAstronomy_640x290.jpg b/blog/static/images/2016/09/TactileAstronomy_640x290.jpg new file mode 100644 index 0000000..11d257e Binary files /dev/null and b/blog/static/images/2016/09/TactileAstronomy_640x290.jpg differ diff --git a/blog/static/images/2016/09/bucket-was-funded.png b/blog/static/images/2016/09/bucket-was-funded.png new file mode 100644 index 0000000..9eb22e5 Binary files /dev/null and b/blog/static/images/2016/09/bucket-was-funded.png differ diff --git a/blog/static/images/2016/09/chilli4change-food.jpg b/blog/static/images/2016/09/chilli4change-food.jpg new file mode 100644 index 0000000..ab609cb Binary files /dev/null and b/blog/static/images/2016/09/chilli4change-food.jpg differ diff --git a/blog/static/images/2016/09/chilli4change-food.png b/blog/static/images/2016/09/chilli4change-food.png new file mode 100644 index 0000000..8e57408 Binary files /dev/null and b/blog/static/images/2016/09/chilli4change-food.png differ diff --git a/blog/static/images/2016/09/mindblown.jpg b/blog/static/images/2016/09/mindblown.jpg new file mode 100644 index 0000000..80770fe Binary files /dev/null and b/blog/static/images/2016/09/mindblown.jpg differ diff --git a/blog/static/images/2017/01/2017-01-03-082158_224x193_escrotum.png b/blog/static/images/2017/01/2017-01-03-082158_224x193_escrotum.png new file mode 100644 index 0000000..ff63f96 Binary files /dev/null and b/blog/static/images/2017/01/2017-01-03-082158_224x193_escrotum.png differ diff --git a/blog/static/images/2017/01/CHIP-Flasher-Plugin-window.png b/blog/static/images/2017/01/CHIP-Flasher-Plugin-window.png new file mode 100644 index 0000000..15f3598 Binary files /dev/null and b/blog/static/images/2017/01/CHIP-Flasher-Plugin-window.png differ diff --git a/blog/static/images/2017/01/ChefK8s.png b/blog/static/images/2017/01/ChefK8s.png new file mode 100644 index 0000000..f6ed0c9 Binary files /dev/null and b/blog/static/images/2017/01/ChefK8s.png differ diff --git a/blog/static/images/2017/01/Flasher-CHIP-Detected.png b/blog/static/images/2017/01/Flasher-CHIP-Detected.png new file mode 100644 index 0000000..9355078 Binary files /dev/null and b/blog/static/images/2017/01/Flasher-CHIP-Detected.png differ diff --git a/blog/static/images/2017/01/Flasher-CHIP-details.png b/blog/static/images/2017/01/Flasher-CHIP-details.png new file mode 100644 index 0000000..c29f589 Binary files /dev/null and b/blog/static/images/2017/01/Flasher-CHIP-details.png differ diff --git a/blog/static/images/2017/01/Flasher-CHIP-image-selection-1.png b/blog/static/images/2017/01/Flasher-CHIP-image-selection-1.png new file mode 100644 index 0000000..5341f7e Binary files /dev/null and b/blog/static/images/2017/01/Flasher-CHIP-image-selection-1.png differ diff --git a/blog/static/images/2017/01/Flasher-CHIP-image-selection.png b/blog/static/images/2017/01/Flasher-CHIP-image-selection.png new file mode 100644 index 0000000..1260a62 Binary files /dev/null and b/blog/static/images/2017/01/Flasher-CHIP-image-selection.png differ diff --git a/blog/static/images/2017/01/ii-community-cloud-1.png b/blog/static/images/2017/01/ii-community-cloud-1.png new file mode 100644 index 0000000..18ed027 Binary files /dev/null and b/blog/static/images/2017/01/ii-community-cloud-1.png differ diff --git a/blog/static/images/2017/01/ii-community-cloud-2.png b/blog/static/images/2017/01/ii-community-cloud-2.png new file mode 100644 index 0000000..8c1f536 Binary files /dev/null and b/blog/static/images/2017/01/ii-community-cloud-2.png differ diff --git a/blog/static/images/2017/01/ii-community-cloud-3.png b/blog/static/images/2017/01/ii-community-cloud-3.png new file mode 100644 index 0000000..11c4360 Binary files /dev/null and b/blog/static/images/2017/01/ii-community-cloud-3.png differ diff --git a/blog/static/images/2017/01/ii-community-cloud.png b/blog/static/images/2017/01/ii-community-cloud.png new file mode 100644 index 0000000..d07b800 Binary files /dev/null and b/blog/static/images/2017/01/ii-community-cloud.png differ diff --git a/blog/static/images/2017/01/ii-logo.png.300x300_q85_crop.jpg b/blog/static/images/2017/01/ii-logo.png.300x300_q85_crop.jpg new file mode 100644 index 0000000..95d61a1 Binary files /dev/null and b/blog/static/images/2017/01/ii-logo.png.300x300_q85_crop.jpg differ diff --git a/blog/static/images/2017/01/ii_3nodes.png b/blog/static/images/2017/01/ii_3nodes.png new file mode 100644 index 0000000..fb4df48 Binary files /dev/null and b/blog/static/images/2017/01/ii_3nodes.png differ diff --git a/blog/static/images/2017/01/ii_certs.png b/blog/static/images/2017/01/ii_certs.png new file mode 100644 index 0000000..6f87b8f Binary files /dev/null and b/blog/static/images/2017/01/ii_certs.png differ diff --git a/blog/static/images/2017/01/ii_gitlab_running.png b/blog/static/images/2017/01/ii_gitlab_running.png new file mode 100644 index 0000000..0e0534d Binary files /dev/null and b/blog/static/images/2017/01/ii_gitlab_running.png differ diff --git a/blog/static/images/2017/01/ii_k8s_configure_master.png b/blog/static/images/2017/01/ii_k8s_configure_master.png new file mode 100644 index 0000000..05cb896 Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_configure_master.png differ diff --git a/blog/static/images/2017/01/ii_k8s_gitlab.png b/blog/static/images/2017/01/ii_k8s_gitlab.png new file mode 100644 index 0000000..e275547 Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_gitlab.png differ diff --git a/blog/static/images/2017/01/ii_k8s_ingress.png b/blog/static/images/2017/01/ii_k8s_ingress.png new file mode 100644 index 0000000..46bd09a Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_ingress.png differ diff --git a/blog/static/images/2017/01/ii_k8s_init.png b/blog/static/images/2017/01/ii_k8s_init.png new file mode 100644 index 0000000..b505c4f Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_init.png differ diff --git a/blog/static/images/2017/01/ii_k8s_kubectl.png b/blog/static/images/2017/01/ii_k8s_kubectl.png new file mode 100644 index 0000000..31b923c Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_kubectl.png differ diff --git a/blog/static/images/2017/01/ii_k8s_minions.png b/blog/static/images/2017/01/ii_k8s_minions.png new file mode 100644 index 0000000..a8a0e2a Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_minions.png differ diff --git a/blog/static/images/2017/01/ii_k8s_pods.png b/blog/static/images/2017/01/ii_k8s_pods.png new file mode 100644 index 0000000..a0e5268 Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_pods.png differ diff --git a/blog/static/images/2017/01/ii_k8s_ready-1.png b/blog/static/images/2017/01/ii_k8s_ready-1.png new file mode 100644 index 0000000..4dbddc8 Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_ready-1.png differ diff --git a/blog/static/images/2017/01/ii_k8s_ready.png b/blog/static/images/2017/01/ii_k8s_ready.png new file mode 100644 index 0000000..4dbddc8 Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_ready.png differ diff --git a/blog/static/images/2017/01/ii_k8s_resin.png b/blog/static/images/2017/01/ii_k8s_resin.png new file mode 100644 index 0000000..c84ce0b Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_resin.png differ diff --git a/blog/static/images/2017/01/ii_k8s_waiting.png b/blog/static/images/2017/01/ii_k8s_waiting.png new file mode 100644 index 0000000..f99d70b Binary files /dev/null and b/blog/static/images/2017/01/ii_k8s_waiting.png differ diff --git a/blog/static/images/2017/01/ii_os_complete.png b/blog/static/images/2017/01/ii_os_complete.png new file mode 100644 index 0000000..8ee11b9 Binary files /dev/null and b/blog/static/images/2017/01/ii_os_complete.png differ diff --git a/blog/static/images/2017/01/ii_poweron.png b/blog/static/images/2017/01/ii_poweron.png new file mode 100644 index 0000000..18a6f58 Binary files /dev/null and b/blog/static/images/2017/01/ii_poweron.png differ diff --git a/blog/static/images/2017/01/ii_provision_hanlon.png b/blog/static/images/2017/01/ii_provision_hanlon.png new file mode 100644 index 0000000..d3c5a3a Binary files /dev/null and b/blog/static/images/2017/01/ii_provision_hanlon.png differ diff --git a/blog/static/images/2017/01/ii_register.png b/blog/static/images/2017/01/ii_register.png new file mode 100644 index 0000000..e719fe3 Binary files /dev/null and b/blog/static/images/2017/01/ii_register.png differ diff --git a/blog/static/images/2017/01/resin-chip-build-1.png b/blog/static/images/2017/01/resin-chip-build-1.png new file mode 100644 index 0000000..9503076 Binary files /dev/null and b/blog/static/images/2017/01/resin-chip-build-1.png differ diff --git a/blog/static/images/2017/01/resin-chip-build.png b/blog/static/images/2017/01/resin-chip-build.png new file mode 100644 index 0000000..fdceafd Binary files /dev/null and b/blog/static/images/2017/01/resin-chip-build.png differ diff --git a/blog/static/images/2017/01/resinos-chip-download.png b/blog/static/images/2017/01/resinos-chip-download.png new file mode 100644 index 0000000..0392297 Binary files /dev/null and b/blog/static/images/2017/01/resinos-chip-download.png differ diff --git a/blog/static/images/2017/01/uboot_fel_jumper.jpg b/blog/static/images/2017/01/uboot_fel_jumper.jpg new file mode 100644 index 0000000..f086ed2 Binary files /dev/null and b/blog/static/images/2017/01/uboot_fel_jumper.jpg differ diff --git a/blog/static/images/2021/apisnoop-progress.png b/blog/static/images/2021/apisnoop-progress.png new file mode 100644 index 0000000..462353c Binary files /dev/null and b/blog/static/images/2021/apisnoop-progress.png differ diff --git a/blog/static/images/README.md b/blog/static/images/README.md new file mode 100644 index 0000000..a6f11db --- /dev/null +++ b/blog/static/images/README.md @@ -0,0 +1,3 @@ +# Content / Images + +If using the standard file storage, Ghost will upload images to this directory. \ No newline at end of file diff --git a/blog/static/images/blog_image/1_15Cover.png b/blog/static/images/blog_image/1_15Cover.png new file mode 100644 index 0000000..3f9396c Binary files /dev/null and b/blog/static/images/blog_image/1_15Cover.png differ diff --git a/blog/static/images/blog_image/1_21cover.png b/blog/static/images/blog_image/1_21cover.png new file mode 100644 index 0000000..14106a0 Binary files /dev/null and b/blog/static/images/blog_image/1_21cover.png differ diff --git a/blog/static/images/blog_image/Conformance-progress.png b/blog/static/images/blog_image/Conformance-progress.png new file mode 100644 index 0000000..fc1e597 Binary files /dev/null and b/blog/static/images/blog_image/Conformance-progress.png differ diff --git a/blog/static/images/blog_image/dan_kohn.jpg b/blog/static/images/blog_image/dan_kohn.jpg new file mode 100644 index 0000000..143d81d Binary files /dev/null and b/blog/static/images/blog_image/dan_kohn.jpg differ diff --git a/blog/static/images/blog_image/disk_graph.png b/blog/static/images/blog_image/disk_graph.png new file mode 100644 index 0000000..2d97426 Binary files /dev/null and b/blog/static/images/blog_image/disk_graph.png differ diff --git a/blog/static/images/logo.png b/blog/static/images/logo.png new file mode 100644 index 0000000..959d53a Binary files /dev/null and b/blog/static/images/logo.png differ diff --git a/blog/static/images/profiles/berno-kleinhans.jpg b/blog/static/images/profiles/berno-kleinhans.jpg new file mode 100644 index 0000000..d4bfae1 Binary files /dev/null and b/blog/static/images/profiles/berno-kleinhans.jpg differ diff --git a/blog/static/images/profiles/brenda-peel.jpg b/blog/static/images/profiles/brenda-peel.jpg new file mode 100644 index 0000000..56efd27 Binary files /dev/null and b/blog/static/images/profiles/brenda-peel.jpg differ diff --git a/blog/static/images/profiles/caleb-woodbine.jpg b/blog/static/images/profiles/caleb-woodbine.jpg new file mode 100644 index 0000000..4f8017a Binary files /dev/null and b/blog/static/images/profiles/caleb-woodbine.jpg differ diff --git a/blog/static/images/profiles/hippie-hacker.jpg b/blog/static/images/profiles/hippie-hacker.jpg new file mode 100644 index 0000000..e12351b Binary files /dev/null and b/blog/static/images/profiles/hippie-hacker.jpg differ diff --git a/blog/static/images/profiles/riaan-kleinhans.png b/blog/static/images/profiles/riaan-kleinhans.png new file mode 100644 index 0000000..0577e44 Binary files /dev/null and b/blog/static/images/profiles/riaan-kleinhans.png differ diff --git a/blog/static/images/profiles/stephen-heywood.jpg b/blog/static/images/profiles/stephen-heywood.jpg new file mode 100644 index 0000000..3500950 Binary files /dev/null and b/blog/static/images/profiles/stephen-heywood.jpg differ diff --git a/blog/static/images/profiles/zach-mandeville.png b/blog/static/images/profiles/zach-mandeville.png new file mode 100644 index 0000000..e5a8710 Binary files /dev/null and b/blog/static/images/profiles/zach-mandeville.png differ diff --git a/blog/static/images/unknown-profile.svg b/blog/static/images/unknown-profile.svg new file mode 100644 index 0000000..a134249 --- /dev/null +++ b/blog/static/images/unknown-profile.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/harbor/.eslintrc.js b/blog/themes/harbor/.eslintrc.js new file mode 100644 index 0000000..ce94cfb --- /dev/null +++ b/blog/themes/harbor/.eslintrc.js @@ -0,0 +1,18 @@ +module.exports = { + env: { + browser: true, + es2021: true + }, + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended' + ], + parser: '@typescript-eslint/parser', + parserOptions: { + ecmaVersion: 12, + sourceType: 'module' + }, + plugins: ['@typescript-eslint'], + rules: { + }, +}; diff --git a/blog/themes/harbor/.github/workflows/ci.yml b/blog/themes/harbor/.github/workflows/ci.yml new file mode 100644 index 0000000..a046cf2 --- /dev/null +++ b/blog/themes/harbor/.github/workflows/ci.yml @@ -0,0 +1,13 @@ +name: Main Workflow +on: [push] +jobs: + sonarCloudTrigger: + name: SonarCloud Trigger + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: SonarCloudScan + uses: sonarsource/sonarcloud-github-action@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} \ No newline at end of file diff --git a/blog/themes/harbor/.gitignore b/blog/themes/harbor/.gitignore new file mode 100644 index 0000000..58b805f --- /dev/null +++ b/blog/themes/harbor/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +node_modules/ \ No newline at end of file diff --git a/blog/themes/harbor/.prettierrc.js b/blog/themes/harbor/.prettierrc.js new file mode 100644 index 0000000..99f09f6 --- /dev/null +++ b/blog/themes/harbor/.prettierrc.js @@ -0,0 +1,13 @@ +module.exports = { + "trailingComma": "es5", + "semi": false, + "singleQuote": true, + "overrides": [ + { + "files": ["*.html"], + "options": { + "parser": "go-template" + } + } + ] +}; diff --git a/blog/themes/harbor/LICENSE b/blog/themes/harbor/LICENSE new file mode 100644 index 0000000..faff36e --- /dev/null +++ b/blog/themes/harbor/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2020 YOUR_NAME_HERE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/blog/themes/harbor/README.md b/blog/themes/harbor/README.md new file mode 100644 index 0000000..2486422 --- /dev/null +++ b/blog/themes/harbor/README.md @@ -0,0 +1,241 @@ +# [harbor](https://themes.gohugo.io/harbor/) - Simple Hugo Theme + +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=matsuyoshi30_harbor&metric=alert_status)](https://sonarcloud.io/dashboard?id=matsuyoshi30_harbor) +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fmatsuyoshi30%2Fharbor.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fmatsuyoshi30%2Fharbor?ref=badge_shield) + +Simple and minimal personal blog theme for [Hugo](https://gohugo.io/). + +![screenshot](https://user-images.githubusercontent.com/16238709/77252732-3698c880-6c99-11ea-9def-15a5f9b918bc.png) + +![screenshot-dark](https://user-images.githubusercontent.com/16238709/77252745-529c6a00-6c99-11ea-95f6-2df83dfff35e.png) + +[Here](https://themes.gohugo.io/theme/harbor/) is the demo link. + +## Features + +- Support tags, categories and archives +- Analytics integration (Google or Goatcounter) +- Responsive +- Dark mode +- Syntax Highlight (see [Hugo doc](https://gohugo.io/content-management/syntax-highlighting/)) +- Search entire blog posts +- Table Of Contents +- Disqus +- Most social media + +## Installation & Update + +``` +$ # install +$ cd themes +$ git submodule add https://github.com/matsuyoshi30/harbor.git harbor + +$ # update +$ git submodule update --remote --merge +``` + +If you want to know more information, see [Hugo doc](https://gohugo.io/themes/installing/). + +## Usage + +### `config.toml` example + +```toml +theme = "harbor" +baseurl = "https://example.com/" +title = "Hugo Themes" +paginate = 3 +languageCode = "en" +DefaultContentLanguage = "en" +enableInlineShortcodes = true +footnoteReturnLinkContents = "^" + +# Optional +# If you use googleAnalytics, you set top-level options in config.toml to the beginning of the config file like other top-level options. +googleAnalytics = "UA-XXXXXXXX-XX" +# and disqus too. +disqusShortName = "yourdisqusshortname" + +[params.goatcounter] + domain="stats.domain.com" + +[Author] + name = "Hugo Author" + +[outputs] + section = ["JSON", "HTML"] + +[[params.nav]] + identifier = "about" + name = "About" + icon = "fas fa-user fa-lg" + url = "/about/" + weight = 3 + +[[params.nav]] + identifier = "tags" + name = "Tags" + icon = "fas fa-tag fa-lg" + url = "tags" + weight = 3 + +[[params.nav]] + identifier = "categories" + name = "Category" + icon = "fas fa-folder-open fa-lg" + url = "categories" + weight = 3 + +[[params.nav]] + identifier = "search" + name = "Search" + icon = "fas fa-search fa-lg" + url = "search" + weight = 3 + +[[params.nav]] + identifier = "archives" + name = "Archives" + icon = "fas fa-archive fa-lg" + url = "archives" + weight = 3 + +# copy paste this block and change for each social media to add how many ever social media +# acounts/links you want +[[params.social]] + name="name of social media" + url="link to social media" + icon="A icon from https://fontawesome.com/" + +[params.logo] + url = "icon.png" # static/images/icon.png + width = 50 + height = 50 + alt = "Logo" +``` + +Before you user my theme, don't remember to change favicon (static/favicon.ico) and icon (static/images/icon.png)! + +If you don't change them, your favicon and icon are my face :) + +### Search entire blog posts + +You should make `search.md` in the `page` directory. + +``` +--- +title: "Search" +--- + +{{}} +``` + +### TOC + +If you want to use TableOfContent, you need to write words greater than 400, and set `true` of the frontmatter `toc`. + +### Back To Top Button + +If you want to use Back To Top Button, you need to write words greater than 400, and set `true` of the frontmatter `backtotop`. + +### Archives + +If you want archive page, generate `archive.md` file in the `content` directory. + +``` +$ hugo new archives.md +``` + +``` ++++ +title: "Archive page" +type: myarchivetype ++++ +``` + +### Override CSS + +If you want to override CSS, add `customCSS` param which is path to CSS file to your config.toml. + +``` +[params] + customCSS = ["/css/custom.css"] # in case you use `/static/css/custom.css` +``` + +### Enable Google Analytics when running as server + +If you want to enable google analytics when running hugo as server, add `enableGoogleAnalytics` param to your config.toml. + +``` +[params] + enableGoogleAnalytics = true +``` + +### Enable UglyURLs + +If you want to enable "Ugly URLs" (e.g. exmaple.com/urls.html), add `uglyurls = true` to top level and [params] of your config.toml. + +``` +uglyurls = true + +[params] + uglyurls = true +``` + +## Frontmatter example + +``` ++++ +title = "Article title here" +date = 2020-02-15T20:00:00+09:00 +tags = ["tags here"] +draft = false +toc = false +backtotop = false +disable_comments = true ++++ + +# Title + + + +## Contents + +``` + +## Contribution + +**Issues and PRs are very welcome!** + +### Development + +If you touch CSS or JavaScript file, you need to build to add your changes following below steps. + +1. Install Node.js and npm (Node.js package manager). + +2. The package.json file in your new sub-theme contains the versions of all the Node.js software you need. + To install them run: + + ``` + $ npm install + ``` + +3. After fixing files in `static` dir, run `build` command to generate `bundle.js` + + ``` + $ # for development version + $ npm run build-dev + + $ # for production version + $ npm run build-prod + ``` + +## LICENSE + +[MIT](./LICENSE). + +[![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fmatsuyoshi30%2Fharbor.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fmatsuyoshi30%2Fharbor?ref=badge_large) + +## Author + +[matsuyoshi30](https://twitter.com/matsuyoshi30) diff --git a/blog/themes/harbor/archetypes/default.md b/blog/themes/harbor/archetypes/default.md new file mode 100644 index 0000000..55dad13 --- /dev/null +++ b/blog/themes/harbor/archetypes/default.md @@ -0,0 +1,14 @@ ++++ +title = "{{ replace .TranslationBaseName "-" " " | title }}" +date = {{ .Date }} +tags = [""] +draft = false +toc = false +backtotop = false ++++ + +# Title + + + +## Contents \ No newline at end of file diff --git a/blog/themes/harbor/exampleSite/config.toml b/blog/themes/harbor/exampleSite/config.toml new file mode 100644 index 0000000..a5fa52b --- /dev/null +++ b/blog/themes/harbor/exampleSite/config.toml @@ -0,0 +1,58 @@ +baseURL = "https://example.org/" +title = "Hugo Themes" +author = "Steve Francia" +paginate = 3 +languageCode = "en" +DefaultContentLanguage = "en" +enableInlineShortcodes = true +footnoteReturnLinkContents = "^" +# REMOVE THIS +themesDir = "../../" +# DO NOT REMOVE THIS +theme = "harbor" + +[Author] + name = "hugo author" + +[permalinks] + post = "/:year/:month/:day/:filename/" + page = "/:filename/" + +[outputs] + section = ["JSON", "HTML"] + +[[params.nav]] + identifier = "about" + name = "About" + icon = "fas fa-user fa-lg" + url = "/about/" + weight = 3 + +[[params.nav]] + identifier = "tags" + name = "Tags" + icon = "fas fa-tag fa-lg" + url = "/tags/" + weight = 3 + +[[params.nav]] + identifier = "search" + name = "Search" + icon = "fas fa-search fa-lg" + url = "/search/" + weight = 3 + +[[params.social]] + name= "twitter" + url="https://twitter.com/GoHugoIO" + icon="fab fa-twitter" + +[params] + mainSections = ["post"] + favicon = "favicon.ico" + + [params.logo] + url = "icon.png" + width = 50 + height = 50 + alt = "Logo" \ No newline at end of file diff --git a/blog/themes/harbor/exampleSite/content/archives.md b/blog/themes/harbor/exampleSite/content/archives.md new file mode 100644 index 0000000..5916a39 --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/archives.md @@ -0,0 +1,4 @@ +--- +title: "Archives" +type: archives +--- \ No newline at end of file diff --git a/blog/themes/harbor/exampleSite/content/page/about.md b/blog/themes/harbor/exampleSite/content/page/about.md new file mode 100644 index 0000000..c2ba680 --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/page/about.md @@ -0,0 +1,7 @@ +--- +title: 'Our Difference' +button: 'About us' +weight: 2 +--- + +Lorem ipsum dolor sit amet, et essent mediocritatem quo, choro volumus oporteat an mei. ipsum dolor sit amet, et essent mediocritatem quo, \ No newline at end of file diff --git a/blog/themes/harbor/exampleSite/content/page/search.md b/blog/themes/harbor/exampleSite/content/page/search.md new file mode 100644 index 0000000..494916f --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/page/search.md @@ -0,0 +1,5 @@ +--- +title: "Search" +--- + +{{}} \ No newline at end of file diff --git a/blog/themes/harbor/exampleSite/content/post/emoji-support.md b/blog/themes/harbor/exampleSite/content/post/emoji-support.md new file mode 100644 index 0000000..ecf6c86 --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/post/emoji-support.md @@ -0,0 +1,47 @@ ++++ +author = "Hugo Authors" +title = "Emoji Support" +date = "2019-03-05" +description = "Guide to emoji usage in Hugo" +tags = [ + "emoji", +] ++++ + +Emoji can be enabled in a Hugo project in a number of ways. + +The [`emojify`](https://gohugo.io/functions/emojify/) function can be called directly in templates or [Inline Shortcodes](https://gohugo.io/templates/shortcode-templates/#inline-shortcodes). + +To enable emoji globally, set `enableEmoji` to `true` in your site’s [configuration](https://gohugo.io/getting-started/configuration/) and then you can type emoji shorthand codes directly in content files; e.g. + + +

🙈 :see_no_evil: 🙉 :hear_no_evil: 🙊 :speak_no_evil:

+
+ +The [Emoji cheat sheet](http://www.emoji-cheat-sheet.com/) is a useful reference for emoji shorthand codes. + +*** + +**N.B.** The above steps enable Unicode Standard emoji characters and sequences in Hugo, however the rendering of these glyphs depends on the browser and the platform. To style the emoji you can either use a third party emoji font or a font stack; e.g. + +{{< highlight html >}} +.emoji { +font-family: Apple Color Emoji,Segoe UI Emoji,NotoColorEmoji,Segoe UI Symbol,Android Emoji,EmojiSymbols; +} +{{< /highlight >}} + +{{< css.inline >}} + +{{< /css.inline >}} \ No newline at end of file diff --git a/blog/themes/harbor/exampleSite/content/post/markdown-syntax.md b/blog/themes/harbor/exampleSite/content/post/markdown-syntax.md new file mode 100644 index 0000000..d60c404 --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/post/markdown-syntax.md @@ -0,0 +1,147 @@ ++++ +author = "Hugo Authors" +title = "Markdown Syntax Guide" +date = "2019-03-11" +description = "Sample article showcasing basic Markdown syntax and formatting for HTML elements." +tags = [ + "markdown", + "css", + "html", + "themes", +] +categories = [ + "themes", + "syntax", +] +series = ["Themes Guide"] +aliases = ["migrate-from-jekyl"] ++++ + +This article offers a sample of basic Markdown syntax that can be used in Hugo content files, also it shows whether basic HTML elements are decorated with CSS in a Hugo theme. + + +## Headings + +The following HTML `

`—`

` elements represent six levels of section headings. `

` is the highest section level while `

` is the lowest. + +# H1 +## H2 +### H3 +#### H4 +##### H5 +###### H6 + +## Paragraph + +Xerum, quo qui aut unt expliquam qui dolut labo. Aque venitatiusda cum, voluptionse latur sitiae dolessi aut parist aut dollo enim qui voluptate ma dolestendit peritin re plis aut quas inctum laceat est volestemque commosa as cus endigna tectur, offic to cor sequas etum rerum idem sintibus eiur? Quianimin porecus evelectur, cum que nis nust voloribus ratem aut omnimi, sitatur? Quiatem. Nam, omnis sum am facea corem alique molestrunt et eos evelece arcillit ut aut eos eos nus, sin conecerem erum fuga. Ri oditatquam, ad quibus unda veliamenimin cusam et facea ipsamus es exerum sitate dolores editium rerore eost, temped molorro ratiae volorro te reribus dolorer sperchicium faceata tiustia prat. + +Itatur? Quiatae cullecum rem ent aut odis in re eossequodi nonsequ idebis ne sapicia is sinveli squiatum, core et que aut hariosam ex eat. + +## Blockquotes + +The blockquote element represents content that is quoted from another source, optionally with a citation which must be within a `footer` or `cite` element, and optionally with in-line changes such as annotations and abbreviations. + +#### Blockquote without attribution + +> Tiam, ad mint andaepu dandae nostion secatur sequo quae. +> **Note** that you can use *Markdown syntax* within a blockquote. + +#### Blockquote with attribution + +> Don't communicate by sharing memory, share memory by communicating.

+> — Rob Pike[^1] + + +[^1]: The above quote is excerpted from Rob Pike's [talk](https://www.youtube.com/watch?v=PAAkCSZUG1c) during Gopherfest, November 18, 2015. + +## Tables + +Tables aren't part of the core Markdown spec, but Hugo supports supports them out-of-the-box. + + Name | Age +--------|------ + Bob | 27 + Alice | 23 + +#### Inline Markdown within tables + +| Inline    | Markdown    | In    | Table | +| ---------- | --------- | ----------------- | ---------- | +| *italics* | **bold** | ~~strikethrough~~    | `code` | + +## Code Blocks + +#### Code block with backticks + +``` +html + + + + + Example HTML5 Document + + +

Test

+ + +``` +#### Code block indented with four spaces + + + + + + Example HTML5 Document + + +

Test

+ + + +#### Code block with Hugo's internal highlight shortcode +{{< highlight html >}} + + + + + Example HTML5 Document + + +

Test

+ + +{{< /highlight >}} + +## List Types + +#### Ordered List + +1. First item +2. Second item +3. Third item + +#### Unordered List + +* List item +* Another item +* And another item + +#### Nested list + +* Item +1. First Sub-item +2. Second Sub-item + +## Other Elements — abbr, sub, sup, kbd, mark + +GIF is a bitmap image format. + +H2O + +Xn + Yn = Zn + +Press CTRL+ALT+Delete to end the session. + +Most salamanders are nocturnal, and hunt for insects, worms, and other small creatures. + diff --git a/blog/themes/harbor/exampleSite/content/post/math-typesetting.mmark b/blog/themes/harbor/exampleSite/content/post/math-typesetting.mmark new file mode 100644 index 0000000..7f421ae --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/post/math-typesetting.mmark @@ -0,0 +1,46 @@ +--- +author: Hugo Authors +title: Math Typesetting +date: 2019-03-08 +description: A brief guide to setup KaTeX +markup: mmark +math: true +--- + +Mathematical notation in a Hugo project can be enabled by using third party JavaScript libraries. + + +In this example we will be using [KaTeX](https://katex.org/) + +- Create a partial under `/layouts/partials/math.html` +- Within this partial reference the [Auto-render Extension](https://katex.org/docs/autorender.html) or host these scripts locally. +- Include the partial in your templates like so: + +``` +{{ if or .Params.math .Site.Params.math }} +{{ partial "math.html" . }} +{{ end }} +``` +- To enable KaTex globally set the parameter `math` to `true` in a project's configuration +- To enable KaTex on a per page basis include the parameter `math: true` in content files. + +**Note:** Use the online reference of [Supported TeX Functions](https://katex.org/docs/supported.html) +{{< math.inline >}} +{{ if or .Page.Params.math .Site.Params.math }} + + + + +{{ end }} +{{}} + +### Examples + +Inline math: $$ \varphi = \dfrac{1+\sqrt5}{2}= 1.6180339887… $$ + +Block math: + +$$ + \varphi = 1+\frac{1} {1+\frac{1} {1+\frac{1} {1+\cdots} } } +$$ + diff --git a/blog/themes/harbor/exampleSite/content/post/placeholder-text.md b/blog/themes/harbor/exampleSite/content/post/placeholder-text.md new file mode 100644 index 0000000..378b995 --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/post/placeholder-text.md @@ -0,0 +1,58 @@ ++++ +author = "Hugo Authors" +title = "Placeholder Text" +date = "2019-03-09" +description = "Lorem Ipsum Dolor Si Amet" +tags = [ + "markdown", + "text", +] ++++ + +Lorem est tota propiore conpellat pectoribus de +pectora summo. Redit teque digerit hominumque toris verebor lumina non cervice +subde tollit usus habet Arctonque, furores quas nec ferunt. Quoque montibus nunc +caluere tempus inhospita parcite confusaque translucet patri vestro qui optatis +lumine cognoscere flos nubis! Fronde ipsamque patulos Dryopen deorum. + +1. Exierant elisi ambit vivere dedere +2. Duce pollice +3. Eris modo +4. Spargitque ferrea quos palude + +Rursus nulli murmur; hastile inridet ut ab gravi sententia! Nomine potitus +silentia flumen, sustinet placuit petis in dilapsa erat sunt. Atria +tractus malis. + +1. Comas hunc haec pietate fetum procerum dixit +2. Post torum vates letum Tiresia +3. Flumen querellas +4. Arcanaque montibus omnes +5. Quidem et + +# Vagus elidunt + + + +[The Van de Graaf Canon](https://en.wikipedia.org/wiki/Canons_of_page_construction#Van_de_Graaf_canon) + +## Mane refeci capiebant unda mulcebat + +Victa caducifer, malo vulnere contra +dicere aurato, ludit regale, voca! Retorsit colit est profanae esse virescere +furit nec; iaculi matertera et visa est, viribus. Divesque creatis, tecta novat collumque vulnus est, parvas. **Faces illo pepulere** tempus adest. Tendit flamma, ab opes virum sustinet, sidus sequendo urbis. + +Iubar proles corpore raptos vero auctor imperium; sed et huic: manus caeli +Lelegas tu lux. Verbis obstitit intus oblectamina fixis linguisque ausus sperare +Echionides cornuaque tenent clausit possit. Omnia putatur. Praeteritae refert +ausus; ferebant e primus lora nutat, vici quae mea ipse. Et iter nil spectatae +vulnus haerentia iuste et exercebat, sui et. + +Eurytus Hector, materna ipsumque ut Politen, nec, nate, ignari, vernum cohaesit sequitur. Vel **mitis temploque** vocatus, inque alis, *oculos nomen* non silvis corpore coniunx ne displicet illa. Crescunt non unus, vidit visa quantum inmiti flumina mortis facto sic: undique a alios vincula sunt iactata abdita! Suspenderat ego fuit tendit: luna, ante urbem +Propoetides **parte**. + +{{< css.inline >}} + +{{< /css.inline >}} diff --git a/blog/themes/harbor/exampleSite/content/post/rich-content.md b/blog/themes/harbor/exampleSite/content/post/rich-content.md new file mode 100644 index 0000000..407c539 --- /dev/null +++ b/blog/themes/harbor/exampleSite/content/post/rich-content.md @@ -0,0 +1,34 @@ ++++ +author = "Hugo Authors" +title = "Rich Content" +date = "2019-03-10" +description = "A brief description of Hugo Shortcodes" +tags = [ + "shortcodes", + "privacy", +] ++++ + +Hugo ships with several [Built-in Shortcodes](https://gohugo.io/content-management/shortcodes/#use-hugo-s-built-in-shortcodes) for rich content, along with a [Privacy Config](https://gohugo.io/about/hugo-and-gdpr/) and a set of Simple Shortcodes that enable static and no-JS versions of various social media embeds. + +--- + +## YouTube Privacy Enhanced Shortcode + +{{< youtube ZJthWmvUzzc >}} + +
+ +--- + +## Twitter Simple Shortcode + +{{< twitter_simple 1085870671291310081 >}} + +
+ +--- + +## Vimeo Simple Shortcode + +{{< vimeo_simple 48912912 >}} diff --git a/blog/themes/harbor/i18n/de.toml b/blog/themes/harbor/i18n/de.toml new file mode 100644 index 0000000..cfb5e8d --- /dev/null +++ b/blog/themes/harbor/i18n/de.toml @@ -0,0 +1,12 @@ +[older] +other = "Ältere Beiträge" +[newer] +other = "Neuere Beiträge" +[read-more] +other = "Weiterlesen" +[search] +other = "Suchen..." +[powered-by] +other = "Powered By" +[theme] +other = "Theme" diff --git a/blog/themes/harbor/i18n/en.toml b/blog/themes/harbor/i18n/en.toml new file mode 100644 index 0000000..4389025 --- /dev/null +++ b/blog/themes/harbor/i18n/en.toml @@ -0,0 +1,12 @@ +[older] +other = "Older" +[newer] +other = "Newer" +[read-more] +other = "Read More" +[search] +other = "Search..." +[powered-by] +other = "Powered By" +[theme] +other = "Theme" \ No newline at end of file diff --git a/blog/themes/harbor/i18n/fr.toml b/blog/themes/harbor/i18n/fr.toml new file mode 100644 index 0000000..d297355 --- /dev/null +++ b/blog/themes/harbor/i18n/fr.toml @@ -0,0 +1,12 @@ +[older] +other = "Suivant" +[newer] +other = "Précédent" +[read-more] +other = "Lire" +[search] +other = "Recherche..." +[powered-by] +other = "Propulsé par" +[theme] +other = "thème" \ No newline at end of file diff --git a/blog/themes/harbor/i18n/ko.toml b/blog/themes/harbor/i18n/ko.toml new file mode 100644 index 0000000..027de5f --- /dev/null +++ b/blog/themes/harbor/i18n/ko.toml @@ -0,0 +1,12 @@ +[older] +other = "과거" +[newer] +other = "최신" +[read-more] +other = "계속 읽기" +[search] +other = "검색..." +[powered-by] +other = "제공 : " +[theme] +other = "테마" \ No newline at end of file diff --git a/blog/themes/harbor/i18n/pt.toml b/blog/themes/harbor/i18n/pt.toml new file mode 100644 index 0000000..c697558 --- /dev/null +++ b/blog/themes/harbor/i18n/pt.toml @@ -0,0 +1,12 @@ +[older] +other = "Mais antigos" +[newer] +other = "Mais novos" +[read-more] +other = "Ler mais" +[search] +other = "Buscar..." +[powered-by] +other = "Desenvolvido com" +[theme] +other = "tema" \ No newline at end of file diff --git a/blog/themes/harbor/images/screenshot-dark.png b/blog/themes/harbor/images/screenshot-dark.png new file mode 100644 index 0000000..844a79e Binary files /dev/null and b/blog/themes/harbor/images/screenshot-dark.png differ diff --git a/blog/themes/harbor/images/screenshot.png b/blog/themes/harbor/images/screenshot.png new file mode 100644 index 0000000..0d913ef Binary files /dev/null and b/blog/themes/harbor/images/screenshot.png differ diff --git a/blog/themes/harbor/images/tn.png b/blog/themes/harbor/images/tn.png new file mode 100644 index 0000000..3d82027 Binary files /dev/null and b/blog/themes/harbor/images/tn.png differ diff --git a/blog/themes/harbor/layouts/404.html b/blog/themes/harbor/layouts/404.html new file mode 100644 index 0000000..ae96c48 --- /dev/null +++ b/blog/themes/harbor/layouts/404.html @@ -0,0 +1 @@ +{{ define "main" }} {{ end }} diff --git a/blog/themes/harbor/layouts/_default/author.html b/blog/themes/harbor/layouts/_default/author.html new file mode 100644 index 0000000..6e2e533 --- /dev/null +++ b/blog/themes/harbor/layouts/_default/author.html @@ -0,0 +1,80 @@ + + + + {{ partial "head.html" . }} + + + + + +
+

{{ .Params.name }}

+
+ {{ .Content }} +
+
+
+

Articles

+
    + {{ range .Pages }} + {{ partial "h3preview.html" . }} + {{ end }} +
+
+ {{ partial "footer.html" }} + + diff --git a/blog/themes/harbor/layouts/_default/author.terms.html b/blog/themes/harbor/layouts/_default/author.terms.html new file mode 100644 index 0000000..8ab86b0 --- /dev/null +++ b/blog/themes/harbor/layouts/_default/author.terms.html @@ -0,0 +1,13 @@ + + + + {{ partial "head.html" . }} + + + {{ partial "header.html" . }} + {{ range .Data.Pages }} +

{{ .Params.name }}

+ {{end}} + {{ partial "footer.html" . }} + + diff --git a/blog/themes/harbor/layouts/_default/baseof.html b/blog/themes/harbor/layouts/_default/baseof.html new file mode 100644 index 0000000..341c40a --- /dev/null +++ b/blog/themes/harbor/layouts/_default/baseof.html @@ -0,0 +1,11 @@ + + + + {{ partial "head.html" . }} + + + {{ partial "header.html" . }} + {{ block "main" . }}{{ end }} + {{ partial "footer.html" . }} + + diff --git a/blog/themes/harbor/layouts/_default/list.html b/blog/themes/harbor/layouts/_default/list.html new file mode 100644 index 0000000..1d02e1c --- /dev/null +++ b/blog/themes/harbor/layouts/_default/list.html @@ -0,0 +1,45 @@ +{{ define "main" }} +
+
+ {{ if .IsHome }} + {{ $pag := .Paginate (where site.RegularPages "Type" "in" site.Params.mainSections) }} + {{ else }} + {{ $pag := .Paginator.Pages }} + {{ end }} + + {{ range .Paginator.Pages }} + {{ partial "preview.html" . }} + {{ end }} +
+ {{ if or (.Paginator.HasPrev) (.Paginator.HasNext) }} + + {{ end }} +
+{{ end }} diff --git a/blog/themes/harbor/layouts/_default/list.json b/blog/themes/harbor/layouts/_default/list.json new file mode 100644 index 0000000..6c434ef --- /dev/null +++ b/blog/themes/harbor/layouts/_default/list.json @@ -0,0 +1,13 @@ +[ + {{ range $index, $page := .Pages }}{{ if ne $index 0 }},{{ end }} + {{ if eq .Type "post" }} + { + "ref": "{{ $page.Permalink }}", + "title": {{ $page.Title | jsonify }}, + "section": "{{ $page.Section }}", + "date" : {{ $page.Date.Format "2006.01.02" | jsonify }}, + "body": {{ $page.Plain | jsonify }} + } + {{ end }} + {{ end }} +] \ No newline at end of file diff --git a/blog/themes/harbor/layouts/_default/rss.xml b/blog/themes/harbor/layouts/_default/rss.xml new file mode 100644 index 0000000..08815d9 --- /dev/null +++ b/blog/themes/harbor/layouts/_default/rss.xml @@ -0,0 +1,25 @@ + + + {{ with .Title }}{{.}} on {{ end }}{{ .Site.Title }} + {{ .Permalink }} + Recent content {{ with .Title }}in {{.}} {{ end }} + Hugo -- gohugo.io + {{ with .Site.LanguageCode }} + {{.}} + {{end}} + {{.}}{{ with $.Site.Author.name }} ({{.}}){{end}} + {{ if not .Date.IsZero }} + {{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }} + {{ end }} + + {{ range first 10 (where (where .Site.Pages ".Section" "post") "Kind" "page") }} + + {{ .Title }} + {{ .Permalink }} + {{ .Date.Format "Mon, 02 Jan 2006 15:04:05 -0700" | safeHTML }} + {{ .Permalink }} + {{ .Summary | html }} + + {{ end }} + + diff --git a/blog/themes/harbor/layouts/_default/single.html b/blog/themes/harbor/layouts/_default/single.html new file mode 100644 index 0000000..8784d10 --- /dev/null +++ b/blog/themes/harbor/layouts/_default/single.html @@ -0,0 +1,34 @@ +{{ define "main" }} +
+
+
+

By: + {{ range .Param "author" }} + + {{ . }} + + {{ end }} +

+
+ {{ partial "toc.html" . }} + + {{ if .Params.tags }} +
+ {{ range .Params.tags }} + {{ . }}  + {{ end }} +
+ {{ end }} +
+ {{ if and (gt .WordCount 400) (.Param "backtotop") }} + {{ partial "backtotop.html" . }} + + {{ end }} + {{ if (not (isset .Params "disable_comments")) }} + {{ partial "disqus.html" . }} + {{ end }} +
+{{ end }} diff --git a/blog/themes/harbor/layouts/_default/terms.html b/blog/themes/harbor/layouts/_default/terms.html new file mode 100644 index 0000000..5d2d7b4 --- /dev/null +++ b/blog/themes/harbor/layouts/_default/terms.html @@ -0,0 +1,14 @@ +{{ define "main" }} +
+
+ {{ range .Data.Terms.Alphabetical }} +
+

+ {{ .Page.Title }} + {{ .Count }} +

+
+ {{ end }} +
+
+{{ end }} diff --git a/blog/themes/harbor/layouts/archives/single.html b/blog/themes/harbor/layouts/archives/single.html new file mode 100644 index 0000000..d7cc773 --- /dev/null +++ b/blog/themes/harbor/layouts/archives/single.html @@ -0,0 +1,32 @@ + + + + {{ partial "head.html" . }} + + + {{ partial "header.html" . }} +
+
+ + + {{ range (where (where .Site.Pages ".Section" "post") "Kind" "page") }} + + + + + {{ end }} + +
+ {{ .Date | dateFormat "2006" }} {{ .Date | dateFormat "Jan" }} {{ .Date | dateFormat "2" }} + +

+ {{ .Title }} +

+
+
+
+ {{ partial "footer.html" . }} + + diff --git a/blog/themes/harbor/layouts/partials/analytics.html b/blog/themes/harbor/layouts/partials/analytics.html new file mode 100644 index 0000000..16a4444 --- /dev/null +++ b/blog/themes/harbor/layouts/partials/analytics.html @@ -0,0 +1,17 @@ +{{ if or ( .Site.Params.enableGoogleAnalytics ) ( not .Site.IsServer ) }} + {{ with .Site.GoogleAnalytics }} + + + + {{ end }} +{{ end }} +{{ if and (.Site.Params.goatcounter) ( not .Site.IsServer ) }} + {{ $domain := .Site.Params.goatcounter.domain }} + +{{ end }} diff --git a/blog/themes/harbor/layouts/partials/backtotop.html b/blog/themes/harbor/layouts/partials/backtotop.html new file mode 100644 index 0000000..c674999 --- /dev/null +++ b/blog/themes/harbor/layouts/partials/backtotop.html @@ -0,0 +1,19 @@ + diff --git a/blog/themes/harbor/layouts/partials/disqus.html b/blog/themes/harbor/layouts/partials/disqus.html new file mode 100644 index 0000000..47f16dc --- /dev/null +++ b/blog/themes/harbor/layouts/partials/disqus.html @@ -0,0 +1,26 @@ +{{ with .Site.DisqusShortname }} +
+ + + comments powered by Disqus +{{ end }} diff --git a/blog/themes/harbor/layouts/partials/footer.html b/blog/themes/harbor/layouts/partials/footer.html new file mode 100644 index 0000000..db025c8 --- /dev/null +++ b/blog/themes/harbor/layouts/partials/footer.html @@ -0,0 +1,30 @@ + diff --git a/blog/themes/harbor/layouts/partials/h3preview.html b/blog/themes/harbor/layouts/partials/h3preview.html new file mode 100644 index 0000000..e9fc87b --- /dev/null +++ b/blog/themes/harbor/layouts/partials/h3preview.html @@ -0,0 +1,15 @@ +
+ +

{{ .Title }}

+
+
+

{{ .Summary }}

+ {{ i18n "read-more" }} +
+ + +
diff --git a/blog/themes/harbor/layouts/partials/head.html b/blog/themes/harbor/layouts/partials/head.html new file mode 100644 index 0000000..412dd3d --- /dev/null +++ b/blog/themes/harbor/layouts/partials/head.html @@ -0,0 +1,45 @@ + +{{ .Title }} +{{ with .Site.Params.description }} + +{{ end }} +{{- with .Site.Author.Name }} + +{{- end }} +{{ partial "analytics.html" . }} + + + +{{ template "_internal/twitter_cards.html" . }} +{{ if isset .Site.Taxonomies "series" }} + {{ template "_internal/opengraph.html" . }} +{{ end }} + + +{{ range .Site.Params.customCSS }} + +{{ end }} + + + + + + +{{ hugo.Generator -}} diff --git a/blog/themes/harbor/layouts/partials/header.html b/blog/themes/harbor/layouts/partials/header.html new file mode 100644 index 0000000..9eeda2d --- /dev/null +++ b/blog/themes/harbor/layouts/partials/header.html @@ -0,0 +1,34 @@ +{{ if .IsHome }} + {{ if .Site.Params.homeTitle }} + {{ $.Scratch.Set "title" .Site.Params.homeTitle }} + {{ else }} + {{ $.Scratch.Set "title" .Site.Title }} + {{ end }} + {{ else }} + {{ $.Scratch.Set "title" .Title }} +{{ end }} + +{{ $title := $.Scratch.Get "title" }} +{{ if $title }} +
+ {{ partial "nav.html" . }} +
+
+
+ {{ if eq .Type "list" }} +

{{ if .Data.Singular }}#{{ end }}{{ .Title }}

+ {{ else }} +

+ {{ with $title }}{{ . }}{{ else }}
{{ end }} +

+ {{ end }} + {{ if eq .Type "post" }} + {{ partial "postmeta.html" . }} + {{ end }} +
+
+
+
+ {{ else }} +
+{{ end }} diff --git a/blog/themes/harbor/layouts/partials/nav.html b/blog/themes/harbor/layouts/partials/nav.html new file mode 100644 index 0000000..f8eba73 --- /dev/null +++ b/blog/themes/harbor/layouts/partials/nav.html @@ -0,0 +1,30 @@ + diff --git a/blog/themes/harbor/layouts/partials/postmeta.html b/blog/themes/harbor/layouts/partials/postmeta.html new file mode 100644 index 0000000..bb36e3a --- /dev/null +++ b/blog/themes/harbor/layouts/partials/postmeta.html @@ -0,0 +1,11 @@ + +  {{ .Lastmod.Format ( .Site.Params.dateformat | default "Jan 2, 2006") }} + {{ if .Params.categories }} +      + {{ range .Params.categories }} + {{ . }}  + {{ end }} + {{ end }} + diff --git a/blog/themes/harbor/layouts/partials/preview.html b/blog/themes/harbor/layouts/partials/preview.html new file mode 100644 index 0000000..dacf843 --- /dev/null +++ b/blog/themes/harbor/layouts/partials/preview.html @@ -0,0 +1,15 @@ +
+ +

{{ .Title }}

+
+
+

{{ .Summary }}

+ {{ i18n "read-more" }} +
+ + +
diff --git a/blog/themes/harbor/layouts/partials/social.html b/blog/themes/harbor/layouts/partials/social.html new file mode 100644 index 0000000..0cadffc --- /dev/null +++ b/blog/themes/harbor/layouts/partials/social.html @@ -0,0 +1,20 @@ +{{ if .Site.Params.social }} +
+ {{ range $social := .Site.Params.social }} + + {{ end }} +{{ end }} +{{ if .Site.Params.social_static }} + {{ $base := .Site.BaseURL }} + {{ range $social := .Site.Params.social_static }} + + {{end}} +
+{{ end }} diff --git a/blog/themes/harbor/layouts/partials/toc.html b/blog/themes/harbor/layouts/partials/toc.html new file mode 100644 index 0000000..0289e2a --- /dev/null +++ b/blog/themes/harbor/layouts/partials/toc.html @@ -0,0 +1,18 @@ +{{ if and (gt .WordCount 400) (.Param "toc") }} + {{- /* 正規表現でh[1-6]を探す */ -}} + {{- $header := (findRE "(?:.|\n)*?" .Content) -}} + {{- /* 最初に出現するh[1-6]を取得 */ -}} + {{- $firstH := index $header 0 -}} + + {{- if ne $firstH nil -}} + {{- /* ヘッダーの前にToCを結合した「新しいヘッダー」を作成 */ -}} + {{- $newH := printf `%s%s` .TableOfContents $firstH -}} + {{- /* 古いヘッダーを新しいヘッダーに置換して出力 */ -}} + {{- replace .Content $firstH $newH | safeHTML -}} + {{- else -}} + {{- /* そもそもヘッダーがない時は普通に出力 */ -}} + {{- .Content -}} + {{- end -}} + {{ else }} + {{ .Content }} +{{ end }} diff --git a/blog/themes/harbor/layouts/shortcodes/search.html b/blog/themes/harbor/layouts/shortcodes/search.html new file mode 100644 index 0000000..87f8596 --- /dev/null +++ b/blog/themes/harbor/layouts/shortcodes/search.html @@ -0,0 +1,10 @@ +
+ +
+
+ diff --git a/blog/themes/harbor/package-lock.json b/blog/themes/harbor/package-lock.json new file mode 100644 index 0000000..5a34a40 --- /dev/null +++ b/blog/themes/harbor/package-lock.json @@ -0,0 +1,8079 @@ +{ + "name": "harbor", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@babel/code-frame": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", + "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", + "dev": true, + "requires": { + "@babel/highlight": "^7.10.4" + } + }, + "@babel/helper-validator-identifier": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", + "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==", + "dev": true + }, + "@babel/highlight": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz", + "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.10.4", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + }, + "dependencies": { + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + } + } + }, + "@eslint/eslintrc": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.2.1.tgz", + "integrity": "sha512-XRUeBZ5zBWLYgSANMpThFddrZZkEbGHgUdt5UJjZfnlN9BGCiUBrf+nvbRupSjMvqzwnQN0qwCmOxITt1cfywA==", + "dev": true, + "requires": { + "ajv": "^6.12.4", + "debug": "^4.1.1", + "espree": "^7.3.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.2.1", + "js-yaml": "^3.13.1", + "lodash": "^4.17.19", + "minimatch": "^3.0.4", + "strip-json-comments": "^3.1.1" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "globals": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "dev": true, + "requires": { + "type-fest": "^0.8.1" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "@nodelib/fs.scandir": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz", + "integrity": "sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "2.0.3", + "run-parallel": "^1.1.9" + } + }, + "@nodelib/fs.stat": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz", + "integrity": "sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA==", + "dev": true + }, + "@nodelib/fs.walk": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz", + "integrity": "sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ==", + "dev": true, + "requires": { + "@nodelib/fs.scandir": "2.1.3", + "fastq": "^1.6.0" + } + }, + "@types/eslint-visitor-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@types/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz", + "integrity": "sha512-OCutwjDZ4aFS6PB1UZ988C4YgwlBHJd6wCeQqaLdmadZ/7e+w79+hbMUFC1QXDNCmdyoRfAFdm0RypzwR+Qpag==", + "dev": true + }, + "@types/json-schema": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.6.tgz", + "integrity": "sha512-3c+yGKvVP5Y9TYBEibGNR+kLtijnj7mYrXRg+WpFb2X9xm04g/DXYkfg4hmzJQosc9snFNUPkbYIhu+KAm6jJw==", + "dev": true + }, + "@types/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "dev": true + }, + "@typescript-eslint/eslint-plugin": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-4.8.1.tgz", + "integrity": "sha512-d7LeQ7dbUrIv5YVFNzGgaW3IQKMmnmKFneRWagRlGYOSfLJVaRbj/FrBNOBC1a3tVO+TgNq1GbHvRtg1kwL0FQ==", + "dev": true, + "requires": { + "@typescript-eslint/experimental-utils": "4.8.1", + "@typescript-eslint/scope-manager": "4.8.1", + "debug": "^4.1.1", + "functional-red-black-tree": "^1.0.1", + "regexpp": "^3.0.0", + "semver": "^7.3.2", + "tsutils": "^3.17.1" + }, + "dependencies": { + "@typescript-eslint/experimental-utils": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-4.8.1.tgz", + "integrity": "sha512-WigyLn144R3+lGATXW4nNcDJ9JlTkG8YdBWHkDlN0lC3gUGtDi7Pe3h5GPvFKMcRz8KbZpm9FJV9NTW8CpRHpg==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.3", + "@typescript-eslint/scope-manager": "4.8.1", + "@typescript-eslint/types": "4.8.1", + "@typescript-eslint/typescript-estree": "4.8.1", + "eslint-scope": "^5.0.0", + "eslint-utils": "^2.0.0" + } + }, + "@typescript-eslint/types": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-4.8.1.tgz", + "integrity": "sha512-ave2a18x2Y25q5K05K/U3JQIe2Av4+TNi/2YuzyaXLAsDx6UZkz1boZ7nR/N6Wwae2PpudTZmHFXqu7faXfHmA==", + "dev": true + }, + "@typescript-eslint/typescript-estree": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-4.8.1.tgz", + "integrity": "sha512-bJ6Fn/6tW2g7WIkCWh3QRlaSU7CdUUK52shx36/J7T5oTQzANvi6raoTsbwGM11+7eBbeem8hCCKbyvAc0X3sQ==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.8.1", + "@typescript-eslint/visitor-keys": "4.8.1", + "debug": "^4.1.1", + "globby": "^11.0.1", + "is-glob": "^4.0.1", + "lodash": "^4.17.15", + "semver": "^7.3.2", + "tsutils": "^3.17.1" + } + }, + "@typescript-eslint/visitor-keys": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-4.8.1.tgz", + "integrity": "sha512-3nrwXFdEYALQh/zW8rFwP4QltqsanCDz4CwWMPiIZmwlk9GlvBeueEIbq05SEq4ganqM0g9nh02xXgv5XI3PeQ==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.8.1", + "eslint-visitor-keys": "^2.0.0" + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + }, + "eslint-visitor-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.0.0.tgz", + "integrity": "sha512-QudtT6av5WXels9WjIM7qz1XD1cWGvX4gGXvp/zBn9nXG02D0utdU3Em2m/QjTnrsk6bBjmCygl3rmj118msQQ==", + "dev": true + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "semver": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", + "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", + "dev": true + } + } + }, + "@typescript-eslint/experimental-utils": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-3.10.1.tgz", + "integrity": "sha512-DewqIgscDzmAfd5nOGe4zm6Bl7PKtMG2Ad0KG8CUZAHlXfAKTF9Ol5PXhiMh39yRL2ChRH1cuuUGOcVyyrhQIw==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.3", + "@typescript-eslint/types": "3.10.1", + "@typescript-eslint/typescript-estree": "3.10.1", + "eslint-scope": "^5.0.0", + "eslint-utils": "^2.0.0" + }, + "dependencies": { + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + } + } + }, + "@typescript-eslint/parser": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-3.10.1.tgz", + "integrity": "sha512-Ug1RcWcrJP02hmtaXVS3axPPTTPnZjupqhgj+NnZ6BCkwSImWk/283347+x9wN+lqOdK9Eo3vsyiyDHgsmiEJw==", + "dev": true, + "requires": { + "@types/eslint-visitor-keys": "^1.0.0", + "@typescript-eslint/experimental-utils": "3.10.1", + "@typescript-eslint/types": "3.10.1", + "@typescript-eslint/typescript-estree": "3.10.1", + "eslint-visitor-keys": "^1.1.0" + } + }, + "@typescript-eslint/scope-manager": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-4.8.1.tgz", + "integrity": "sha512-r0iUOc41KFFbZdPAdCS4K1mXivnSZqXS5D9oW+iykQsRlTbQRfuFRSW20xKDdYiaCoH+SkSLeIF484g3kWzwOQ==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.8.1", + "@typescript-eslint/visitor-keys": "4.8.1" + }, + "dependencies": { + "@typescript-eslint/types": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-4.8.1.tgz", + "integrity": "sha512-ave2a18x2Y25q5K05K/U3JQIe2Av4+TNi/2YuzyaXLAsDx6UZkz1boZ7nR/N6Wwae2PpudTZmHFXqu7faXfHmA==", + "dev": true + }, + "@typescript-eslint/visitor-keys": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-4.8.1.tgz", + "integrity": "sha512-3nrwXFdEYALQh/zW8rFwP4QltqsanCDz4CwWMPiIZmwlk9GlvBeueEIbq05SEq4ganqM0g9nh02xXgv5XI3PeQ==", + "dev": true, + "requires": { + "@typescript-eslint/types": "4.8.1", + "eslint-visitor-keys": "^2.0.0" + } + }, + "eslint-visitor-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.0.0.tgz", + "integrity": "sha512-QudtT6av5WXels9WjIM7qz1XD1cWGvX4gGXvp/zBn9nXG02D0utdU3Em2m/QjTnrsk6bBjmCygl3rmj118msQQ==", + "dev": true + } + } + }, + "@typescript-eslint/types": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-3.10.1.tgz", + "integrity": "sha512-+3+FCUJIahE9q0lDi1WleYzjCwJs5hIsbugIgnbB+dSCYUxl8L6PwmsyOPFZde2hc1DlTo/xnkOgiTLSyAbHiQ==", + "dev": true + }, + "@typescript-eslint/typescript-estree": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-3.10.1.tgz", + "integrity": "sha512-QbcXOuq6WYvnB3XPsZpIwztBoquEYLXh2MtwVU+kO8jgYCiv4G5xrSP/1wg4tkvrEE+esZVquIPX/dxPlePk1w==", + "dev": true, + "requires": { + "@typescript-eslint/types": "3.10.1", + "@typescript-eslint/visitor-keys": "3.10.1", + "debug": "^4.1.1", + "glob": "^7.1.6", + "is-glob": "^4.0.1", + "lodash": "^4.17.15", + "semver": "^7.3.2", + "tsutils": "^3.17.1" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "semver": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", + "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", + "dev": true + } + } + }, + "@typescript-eslint/visitor-keys": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-3.10.1.tgz", + "integrity": "sha512-9JgC82AaQeglebjZMgYR5wgmfUdUc+EitGUUMW8u2nDckaeimzW+VsoLV6FoimPv2id3VQzfjwBxEMVz08ameQ==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "@webassemblyjs/ast": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.8.5.tgz", + "integrity": "sha512-aJMfngIZ65+t71C3y2nBBg5FFG0Okt9m0XEgWZ7Ywgn1oMAT8cNwx00Uv1cQyHtidq0Xn94R4TAywO+LCQ+ZAQ==", + "dev": true, + "requires": { + "@webassemblyjs/helper-module-context": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/wast-parser": "1.8.5" + } + }, + "@webassemblyjs/floating-point-hex-parser": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz", + "integrity": "sha512-9p+79WHru1oqBh9ewP9zW95E3XAo+90oth7S5Re3eQnECGq59ly1Ri5tsIipKGpiStHsUYmY3zMLqtk3gTcOtQ==", + "dev": true + }, + "@webassemblyjs/helper-api-error": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz", + "integrity": "sha512-Za/tnzsvnqdaSPOUXHyKJ2XI7PDX64kWtURyGiJJZKVEdFOsdKUCPTNEVFZq3zJ2R0G5wc2PZ5gvdTRFgm81zA==", + "dev": true + }, + "@webassemblyjs/helper-buffer": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz", + "integrity": "sha512-Ri2R8nOS0U6G49Q86goFIPNgjyl6+oE1abW1pS84BuhP1Qcr5JqMwRFT3Ah3ADDDYGEgGs1iyb1DGX+kAi/c/Q==", + "dev": true + }, + "@webassemblyjs/helper-code-frame": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz", + "integrity": "sha512-VQAadSubZIhNpH46IR3yWO4kZZjMxN1opDrzePLdVKAZ+DFjkGD/rf4v1jap744uPVU6yjL/smZbRIIJTOUnKQ==", + "dev": true, + "requires": { + "@webassemblyjs/wast-printer": "1.8.5" + } + }, + "@webassemblyjs/helper-fsm": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz", + "integrity": "sha512-kRuX/saORcg8se/ft6Q2UbRpZwP4y7YrWsLXPbbmtepKr22i8Z4O3V5QE9DbZK908dh5Xya4Un57SDIKwB9eow==", + "dev": true + }, + "@webassemblyjs/helper-module-context": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz", + "integrity": "sha512-/O1B236mN7UNEU4t9X7Pj38i4VoU8CcMHyy3l2cV/kIF4U5KoHXDVqcDuOs1ltkac90IM4vZdHc52t1x8Yfs3g==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "mamacro": "^0.0.3" + } + }, + "@webassemblyjs/helper-wasm-bytecode": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz", + "integrity": "sha512-Cu4YMYG3Ddl72CbmpjU/wbP6SACcOPVbHN1dI4VJNJVgFwaKf1ppeFJrwydOG3NDHxVGuCfPlLZNyEdIYlQ6QQ==", + "dev": true + }, + "@webassemblyjs/helper-wasm-section": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz", + "integrity": "sha512-VV083zwR+VTrIWWtgIUpqfvVdK4ff38loRmrdDBgBT8ADXYsEZ5mPQ4Nde90N3UYatHdYoDIFb7oHzMncI02tA==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-buffer": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/wasm-gen": "1.8.5" + } + }, + "@webassemblyjs/ieee754": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz", + "integrity": "sha512-aaCvQYrvKbY/n6wKHb/ylAJr27GglahUO89CcGXMItrOBqRarUMxWLJgxm9PJNuKULwN5n1csT9bYoMeZOGF3g==", + "dev": true, + "requires": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "@webassemblyjs/leb128": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.8.5.tgz", + "integrity": "sha512-plYUuUwleLIziknvlP8VpTgO4kqNaH57Y3JnNa6DLpu/sGcP6hbVdfdX5aHAV716pQBKrfuU26BJK29qY37J7A==", + "dev": true, + "requires": { + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/utf8": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.8.5.tgz", + "integrity": "sha512-U7zgftmQriw37tfD934UNInokz6yTmn29inT2cAetAsaU9YeVCveWEwhKL1Mg4yS7q//NGdzy79nlXh3bT8Kjw==", + "dev": true + }, + "@webassemblyjs/wasm-edit": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz", + "integrity": "sha512-A41EMy8MWw5yvqj7MQzkDjU29K7UJq1VrX2vWLzfpRHt3ISftOXqrtojn7nlPsZ9Ijhp5NwuODuycSvfAO/26Q==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-buffer": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/helper-wasm-section": "1.8.5", + "@webassemblyjs/wasm-gen": "1.8.5", + "@webassemblyjs/wasm-opt": "1.8.5", + "@webassemblyjs/wasm-parser": "1.8.5", + "@webassemblyjs/wast-printer": "1.8.5" + } + }, + "@webassemblyjs/wasm-gen": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz", + "integrity": "sha512-BCZBT0LURC0CXDzj5FXSc2FPTsxwp3nWcqXQdOZE4U7h7i8FqtFK5Egia6f9raQLpEKT1VL7zr4r3+QX6zArWg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/ieee754": "1.8.5", + "@webassemblyjs/leb128": "1.8.5", + "@webassemblyjs/utf8": "1.8.5" + } + }, + "@webassemblyjs/wasm-opt": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz", + "integrity": "sha512-HKo2mO/Uh9A6ojzu7cjslGaHaUU14LdLbGEKqTR7PBKwT6LdPtLLh9fPY33rmr5wcOMrsWDbbdCHq4hQUdd37Q==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-buffer": "1.8.5", + "@webassemblyjs/wasm-gen": "1.8.5", + "@webassemblyjs/wasm-parser": "1.8.5" + } + }, + "@webassemblyjs/wasm-parser": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz", + "integrity": "sha512-pi0SYE9T6tfcMkthwcgCpL0cM9nRYr6/6fjgDtL6q/ZqKHdMWvxitRi5JcZ7RI4SNJJYnYNaWy5UUrHQy998lw==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-api-error": "1.8.5", + "@webassemblyjs/helper-wasm-bytecode": "1.8.5", + "@webassemblyjs/ieee754": "1.8.5", + "@webassemblyjs/leb128": "1.8.5", + "@webassemblyjs/utf8": "1.8.5" + } + }, + "@webassemblyjs/wast-parser": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz", + "integrity": "sha512-daXC1FyKWHF1i11obK086QRlsMsY4+tIOKgBqI1lxAnkp9xe9YMcgOxm9kLe+ttjs5aWV2KKE1TWJCN57/Btsg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/floating-point-hex-parser": "1.8.5", + "@webassemblyjs/helper-api-error": "1.8.5", + "@webassemblyjs/helper-code-frame": "1.8.5", + "@webassemblyjs/helper-fsm": "1.8.5", + "@xtuc/long": "4.2.2" + } + }, + "@webassemblyjs/wast-printer": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz", + "integrity": "sha512-w0U0pD4EhlnvRyeJzBqaVSJAo9w/ce7/WPogeXLzGkO6hzhr4GnQIZ4W4uUt5b9ooAaXPtnXlj0gzsXEOUNYMg==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/wast-parser": "1.8.5", + "@xtuc/long": "4.2.2" + } + }, + "@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true + }, + "@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true + }, + "acorn": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.1.tgz", + "integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA==", + "dev": true + }, + "acorn-jsx": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", + "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", + "dev": true + }, + "aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "requires": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + } + }, + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ajv-errors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz", + "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==", + "dev": true + }, + "ajv-keywords": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.4.1.tgz", + "integrity": "sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ==", + "dev": true + }, + "ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true + }, + "ansi-escapes": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", + "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", + "dev": true + }, + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "dev": true + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } + }, + "anymatch": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz", + "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==", + "dev": true, + "requires": { + "micromatch": "^3.1.4", + "normalize-path": "^2.1.1" + }, + "dependencies": { + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true, + "requires": { + "remove-trailing-separator": "^1.0.1" + } + } + } + }, + "aproba": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", + "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=", + "dev": true + }, + "arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "dev": true + }, + "arr-union": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", + "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=", + "dev": true + }, + "array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true + }, + "array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=", + "dev": true + }, + "arrify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz", + "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", + "dev": true + }, + "asn1.js": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", + "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "assert": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz", + "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==", + "dev": true, + "requires": { + "object-assign": "^4.1.1", + "util": "0.10.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz", + "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE=", + "dev": true + }, + "util": { + "version": "0.10.3", + "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz", + "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=", + "dev": true, + "requires": { + "inherits": "2.0.1" + } + } + } + }, + "assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=", + "dev": true + }, + "astral-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", + "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", + "dev": true + }, + "async-each": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz", + "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==", + "dev": true + }, + "atob": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "dev": true + }, + "babel-code-frame": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", + "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=", + "dev": true, + "requires": { + "chalk": "^1.1.3", + "esutils": "^2.0.2", + "js-tokens": "^3.0.2" + }, + "dependencies": { + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + } + } + }, + "babel-core": { + "version": "6.26.3", + "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", + "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", + "dev": true, + "requires": { + "babel-code-frame": "^6.26.0", + "babel-generator": "^6.26.0", + "babel-helpers": "^6.24.1", + "babel-messages": "^6.23.0", + "babel-register": "^6.26.0", + "babel-runtime": "^6.26.0", + "babel-template": "^6.26.0", + "babel-traverse": "^6.26.0", + "babel-types": "^6.26.0", + "babylon": "^6.18.0", + "convert-source-map": "^1.5.1", + "debug": "^2.6.9", + "json5": "^0.5.1", + "lodash": "^4.17.4", + "minimatch": "^3.0.4", + "path-is-absolute": "^1.0.1", + "private": "^0.1.8", + "slash": "^1.0.0", + "source-map": "^0.5.7" + }, + "dependencies": { + "json5": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", + "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=", + "dev": true + } + } + }, + "babel-generator": { + "version": "6.26.1", + "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", + "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", + "dev": true, + "requires": { + "babel-messages": "^6.23.0", + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "detect-indent": "^4.0.0", + "jsesc": "^1.3.0", + "lodash": "^4.17.4", + "source-map": "^0.5.7", + "trim-right": "^1.0.1" + } + }, + "babel-helper-builder-binary-assignment-operator-visitor": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz", + "integrity": "sha1-zORReto1b0IgvK6KAsKzRvmlZmQ=", + "dev": true, + "requires": { + "babel-helper-explode-assignable-expression": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-call-delegate": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz", + "integrity": "sha1-7Oaqzdx25Bw0YfiL/Fdb0Nqi340=", + "dev": true, + "requires": { + "babel-helper-hoist-variables": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-define-map": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz", + "integrity": "sha1-pfVtq0GiX5fstJjH66ypgZ+Vvl8=", + "dev": true, + "requires": { + "babel-helper-function-name": "^6.24.1", + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "lodash": "^4.17.4" + } + }, + "babel-helper-explode-assignable-expression": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz", + "integrity": "sha1-8luCz33BBDPFX3BZLVdGQArCLKo=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-function-name": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz", + "integrity": "sha1-00dbjAPtmCQqJbSDUasYOZ01gKk=", + "dev": true, + "requires": { + "babel-helper-get-function-arity": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-get-function-arity": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz", + "integrity": "sha1-j3eCqpNAfEHTqlCQj4mwMbG2hT0=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-hoist-variables": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz", + "integrity": "sha1-HssnaJydJVE+rbyZFKc/VAi+enY=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-optimise-call-expression": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz", + "integrity": "sha1-96E0J7qfc/j0+pk8VKl4gtEkQlc=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-helper-regex": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz", + "integrity": "sha1-MlxZ+QL4LyS3T6zu0DY5VPZJXnI=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "lodash": "^4.17.4" + } + }, + "babel-helper-remap-async-to-generator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz", + "integrity": "sha1-XsWBgnrXI/7N04HxySg5BnbkVRs=", + "dev": true, + "requires": { + "babel-helper-function-name": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helper-replace-supers": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz", + "integrity": "sha1-v22/5Dk40XNpohPKiov3S2qQqxo=", + "dev": true, + "requires": { + "babel-helper-optimise-call-expression": "^6.24.1", + "babel-messages": "^6.23.0", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-helpers": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-helpers/-/babel-helpers-6.24.1.tgz", + "integrity": "sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-loader": { + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-7.1.5.tgz", + "integrity": "sha512-iCHfbieL5d1LfOQeeVJEUyD9rTwBcP/fcEbRCfempxTDuqrKpu0AZjLAQHEQa3Yqyj9ORKe2iHfoj4rHLf7xpw==", + "dev": true, + "requires": { + "find-cache-dir": "^1.0.0", + "loader-utils": "^1.0.2", + "mkdirp": "^0.5.1" + }, + "dependencies": { + "find-cache-dir": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-1.0.0.tgz", + "integrity": "sha1-kojj6ePMN0hxfTnq3hfPcfww7m8=", + "dev": true, + "requires": { + "commondir": "^1.0.1", + "make-dir": "^1.0.0", + "pkg-dir": "^2.0.0" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "dev": true, + "requires": { + "locate-path": "^2.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "dev": true, + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dev": true, + "requires": { + "pify": "^3.0.0" + } + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "dev": true, + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", + "dev": true + }, + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=", + "dev": true + }, + "pkg-dir": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", + "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", + "dev": true, + "requires": { + "find-up": "^2.1.0" + } + } + } + }, + "babel-messages": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-messages/-/babel-messages-6.23.0.tgz", + "integrity": "sha1-8830cDhYA1sqKVHG7F7fbGLyYw4=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-check-es2015-constants": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz", + "integrity": "sha1-NRV7EBQm/S/9PaP3XH0ekYNbv4o=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-syntax-async-functions": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz", + "integrity": "sha1-ytnK0RkbWtY0vzCuCHI5HgZHvpU=", + "dev": true + }, + "babel-plugin-syntax-exponentiation-operator": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz", + "integrity": "sha1-nufoM3KQ2pUoggGmpX9BcDF4MN4=", + "dev": true + }, + "babel-plugin-syntax-trailing-function-commas": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz", + "integrity": "sha1-ugNgk3+NBuQBgKQ/4NVhb/9TLPM=", + "dev": true + }, + "babel-plugin-transform-async-to-generator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz", + "integrity": "sha1-ZTbjeK/2yx1VF6wOQOs+n8jQh2E=", + "dev": true, + "requires": { + "babel-helper-remap-async-to-generator": "^6.24.1", + "babel-plugin-syntax-async-functions": "^6.8.0", + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-arrow-functions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz", + "integrity": "sha1-RSaSy3EdX3ncf4XkQM5BufJE0iE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-block-scoped-functions": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz", + "integrity": "sha1-u8UbSflk1wy42OC5ToICRs46YUE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-block-scoping": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz", + "integrity": "sha1-1w9SmcEwjQXBL0Y4E7CgnnOxiV8=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "babel-template": "^6.26.0", + "babel-traverse": "^6.26.0", + "babel-types": "^6.26.0", + "lodash": "^4.17.4" + } + }, + "babel-plugin-transform-es2015-classes": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz", + "integrity": "sha1-WkxYpQyclGHlZLSyo7+ryXolhNs=", + "dev": true, + "requires": { + "babel-helper-define-map": "^6.24.1", + "babel-helper-function-name": "^6.24.1", + "babel-helper-optimise-call-expression": "^6.24.1", + "babel-helper-replace-supers": "^6.24.1", + "babel-messages": "^6.23.0", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-computed-properties": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz", + "integrity": "sha1-b+Ko0WiV1WNPTNmZttNICjCBWbM=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-destructuring": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz", + "integrity": "sha1-mXux8auWf2gtKwh2/jWNYOdlxW0=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-duplicate-keys": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz", + "integrity": "sha1-c+s9MQypaePvnskcU3QabxV2Qj4=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-for-of": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz", + "integrity": "sha1-9HyVsrYT3x0+zC/bdXNiPHUkhpE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-function-name": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz", + "integrity": "sha1-g0yJhTvDaxrw86TF26qU/Y6sqos=", + "dev": true, + "requires": { + "babel-helper-function-name": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-literals": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz", + "integrity": "sha1-T1SgLWzWbPkVKAAZox0xklN3yi4=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-modules-amd": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz", + "integrity": "sha1-Oz5UAXI5hC1tGcMBHEvS8AoA0VQ=", + "dev": true, + "requires": { + "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-modules-commonjs": { + "version": "6.26.2", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", + "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", + "dev": true, + "requires": { + "babel-plugin-transform-strict-mode": "^6.24.1", + "babel-runtime": "^6.26.0", + "babel-template": "^6.26.0", + "babel-types": "^6.26.0" + } + }, + "babel-plugin-transform-es2015-modules-systemjs": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz", + "integrity": "sha1-/4mhQrkRmpBhlfXxBuzzBdlAfSM=", + "dev": true, + "requires": { + "babel-helper-hoist-variables": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-modules-umd": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz", + "integrity": "sha1-rJl+YoXNGO1hdq22B9YCNErThGg=", + "dev": true, + "requires": { + "babel-plugin-transform-es2015-modules-amd": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-object-super": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz", + "integrity": "sha1-JM72muIcuDp/hgPa0CH1cusnj40=", + "dev": true, + "requires": { + "babel-helper-replace-supers": "^6.24.1", + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-parameters": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz", + "integrity": "sha1-V6w1GrScrxSpfNE7CfZv3wpiXys=", + "dev": true, + "requires": { + "babel-helper-call-delegate": "^6.24.1", + "babel-helper-get-function-arity": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-template": "^6.24.1", + "babel-traverse": "^6.24.1", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-shorthand-properties": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz", + "integrity": "sha1-JPh11nIch2YbvZmkYi5R8U3jiqA=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-spread": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz", + "integrity": "sha1-1taKmfia7cRTbIGlQujdnxdG+NE=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-sticky-regex": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz", + "integrity": "sha1-AMHNsaynERLN8M9hJsLta0V8zbw=", + "dev": true, + "requires": { + "babel-helper-regex": "^6.24.1", + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-plugin-transform-es2015-template-literals": { + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz", + "integrity": "sha1-qEs0UPfp+PH2g51taH2oS7EjbY0=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-typeof-symbol": { + "version": "6.23.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz", + "integrity": "sha1-3sCfHN3/lLUqxz1QXITfWdzOs3I=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-es2015-unicode-regex": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz", + "integrity": "sha1-04sS9C6nMj9yk4fxinxa4frrNek=", + "dev": true, + "requires": { + "babel-helper-regex": "^6.24.1", + "babel-runtime": "^6.22.0", + "regexpu-core": "^2.0.0" + } + }, + "babel-plugin-transform-exponentiation-operator": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz", + "integrity": "sha1-KrDJx/MJj6SJB3cruBP+QejeOg4=", + "dev": true, + "requires": { + "babel-helper-builder-binary-assignment-operator-visitor": "^6.24.1", + "babel-plugin-syntax-exponentiation-operator": "^6.8.0", + "babel-runtime": "^6.22.0" + } + }, + "babel-plugin-transform-regenerator": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz", + "integrity": "sha1-4HA2lvveJ/Cj78rPi03KL3s6jy8=", + "dev": true, + "requires": { + "regenerator-transform": "^0.10.0" + } + }, + "babel-plugin-transform-strict-mode": { + "version": "6.24.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz", + "integrity": "sha1-1fr3qleKZbvlkc9e2uBKDGcCB1g=", + "dev": true, + "requires": { + "babel-runtime": "^6.22.0", + "babel-types": "^6.24.1" + } + }, + "babel-preset-env": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/babel-preset-env/-/babel-preset-env-1.7.0.tgz", + "integrity": "sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg==", + "dev": true, + "requires": { + "babel-plugin-check-es2015-constants": "^6.22.0", + "babel-plugin-syntax-trailing-function-commas": "^6.22.0", + "babel-plugin-transform-async-to-generator": "^6.22.0", + "babel-plugin-transform-es2015-arrow-functions": "^6.22.0", + "babel-plugin-transform-es2015-block-scoped-functions": "^6.22.0", + "babel-plugin-transform-es2015-block-scoping": "^6.23.0", + "babel-plugin-transform-es2015-classes": "^6.23.0", + "babel-plugin-transform-es2015-computed-properties": "^6.22.0", + "babel-plugin-transform-es2015-destructuring": "^6.23.0", + "babel-plugin-transform-es2015-duplicate-keys": "^6.22.0", + "babel-plugin-transform-es2015-for-of": "^6.23.0", + "babel-plugin-transform-es2015-function-name": "^6.22.0", + "babel-plugin-transform-es2015-literals": "^6.22.0", + "babel-plugin-transform-es2015-modules-amd": "^6.22.0", + "babel-plugin-transform-es2015-modules-commonjs": "^6.23.0", + "babel-plugin-transform-es2015-modules-systemjs": "^6.23.0", + "babel-plugin-transform-es2015-modules-umd": "^6.23.0", + "babel-plugin-transform-es2015-object-super": "^6.22.0", + "babel-plugin-transform-es2015-parameters": "^6.23.0", + "babel-plugin-transform-es2015-shorthand-properties": "^6.22.0", + "babel-plugin-transform-es2015-spread": "^6.22.0", + "babel-plugin-transform-es2015-sticky-regex": "^6.22.0", + "babel-plugin-transform-es2015-template-literals": "^6.22.0", + "babel-plugin-transform-es2015-typeof-symbol": "^6.23.0", + "babel-plugin-transform-es2015-unicode-regex": "^6.22.0", + "babel-plugin-transform-exponentiation-operator": "^6.22.0", + "babel-plugin-transform-regenerator": "^6.22.0", + "browserslist": "^3.2.6", + "invariant": "^2.2.2", + "semver": "^5.3.0" + } + }, + "babel-register": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz", + "integrity": "sha1-btAhFz4vy0htestFxgCahW9kcHE=", + "dev": true, + "requires": { + "babel-core": "^6.26.0", + "babel-runtime": "^6.26.0", + "core-js": "^2.5.0", + "home-or-tmp": "^2.0.0", + "lodash": "^4.17.4", + "mkdirp": "^0.5.1", + "source-map-support": "^0.4.15" + }, + "dependencies": { + "source-map-support": { + "version": "0.4.18", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", + "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", + "dev": true, + "requires": { + "source-map": "^0.5.6" + } + } + } + }, + "babel-runtime": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", + "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=", + "dev": true, + "requires": { + "core-js": "^2.4.0", + "regenerator-runtime": "^0.11.0" + } + }, + "babel-template": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-template/-/babel-template-6.26.0.tgz", + "integrity": "sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "babel-traverse": "^6.26.0", + "babel-types": "^6.26.0", + "babylon": "^6.18.0", + "lodash": "^4.17.4" + } + }, + "babel-traverse": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-traverse/-/babel-traverse-6.26.0.tgz", + "integrity": "sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4=", + "dev": true, + "requires": { + "babel-code-frame": "^6.26.0", + "babel-messages": "^6.23.0", + "babel-runtime": "^6.26.0", + "babel-types": "^6.26.0", + "babylon": "^6.18.0", + "debug": "^2.6.8", + "globals": "^9.18.0", + "invariant": "^2.2.2", + "lodash": "^4.17.4" + } + }, + "babel-types": { + "version": "6.26.0", + "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", + "integrity": "sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc=", + "dev": true, + "requires": { + "babel-runtime": "^6.26.0", + "esutils": "^2.0.2", + "lodash": "^4.17.4", + "to-fast-properties": "^1.0.3" + } + }, + "babylon": { + "version": "6.18.0", + "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", + "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "base": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", + "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "dev": true, + "requires": { + "cache-base": "^1.0.1", + "class-utils": "^0.3.5", + "component-emitter": "^1.2.1", + "define-property": "^1.0.0", + "isobject": "^3.0.1", + "mixin-deep": "^1.2.0", + "pascalcase": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "base64-js": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", + "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==", + "dev": true + }, + "big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "dev": true + }, + "binary-extensions": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz", + "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==", + "dev": true + }, + "bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "dev": true, + "optional": true, + "requires": { + "file-uri-to-path": "1.0.0" + } + }, + "bluebird": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", + "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", + "dev": true + }, + "bn.js": { + "version": "4.11.8", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz", + "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==", + "dev": true + }, + "boolify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/boolify/-/boolify-1.0.1.tgz", + "integrity": "sha1-tcCeF8rNET0Rt7s+04TMASmU2Gs=", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "dev": true, + "requires": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "brorand": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", + "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=", + "dev": true + }, + "browserify-aes": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", + "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", + "dev": true, + "requires": { + "buffer-xor": "^1.0.3", + "cipher-base": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.3", + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "browserify-cipher": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz", + "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==", + "dev": true, + "requires": { + "browserify-aes": "^1.0.4", + "browserify-des": "^1.0.0", + "evp_bytestokey": "^1.0.0" + } + }, + "browserify-des": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz", + "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "des.js": "^1.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "browserify-rsa": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz", + "integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "randombytes": "^2.0.1" + } + }, + "browserify-sign": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.0.4.tgz", + "integrity": "sha1-qk62jl17ZYuqa/alfmMMvXqT0pg=", + "dev": true, + "requires": { + "bn.js": "^4.1.1", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.2", + "elliptic": "^6.0.0", + "inherits": "^2.0.1", + "parse-asn1": "^5.0.0" + } + }, + "browserify-zlib": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "dev": true, + "requires": { + "pako": "~1.0.5" + } + }, + "browserslist": { + "version": "3.2.8", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-3.2.8.tgz", + "integrity": "sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30000844", + "electron-to-chromium": "^1.3.47" + } + }, + "buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "dev": true, + "requires": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", + "dev": true + }, + "buffer-xor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", + "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=", + "dev": true + }, + "builtin-status-codes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz", + "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug=", + "dev": true + }, + "cacache": { + "version": "12.0.4", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz", + "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==", + "dev": true, + "requires": { + "bluebird": "^3.5.5", + "chownr": "^1.1.1", + "figgy-pudding": "^3.5.1", + "glob": "^7.1.4", + "graceful-fs": "^4.1.15", + "infer-owner": "^1.0.3", + "lru-cache": "^5.1.1", + "mississippi": "^3.0.0", + "mkdirp": "^0.5.1", + "move-concurrently": "^1.0.1", + "promise-inflight": "^1.0.1", + "rimraf": "^2.6.3", + "ssri": "^6.0.1", + "unique-filename": "^1.1.1", + "y18n": "^4.0.0" + } + }, + "cache-base": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", + "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "dev": true, + "requires": { + "collection-visit": "^1.0.0", + "component-emitter": "^1.2.1", + "get-value": "^2.0.6", + "has-value": "^1.0.0", + "isobject": "^3.0.1", + "set-value": "^2.0.0", + "to-object-path": "^0.3.0", + "union-value": "^1.0.0", + "unset-value": "^1.0.0" + } + }, + "callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "camelcase-keys": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", + "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "map-obj": "^4.0.0", + "quick-lru": "^4.0.1" + } + }, + "caniuse-lite": { + "version": "1.0.30001035", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001035.tgz", + "integrity": "sha512-C1ZxgkuA4/bUEdMbU5WrGY4+UhMFFiXrgNAfxiMIqWgFTWfv/xsZCS2xEHT2LMq7xAZfuAnu6mcqyDl0ZR6wLQ==", + "dev": true + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "dependencies": { + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + } + } + }, + "chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true + }, + "chokidar": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz", + "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==", + "dev": true, + "requires": { + "anymatch": "^2.0.0", + "async-each": "^1.0.1", + "braces": "^2.3.2", + "fsevents": "^1.2.7", + "glob-parent": "^3.1.0", + "inherits": "^2.0.3", + "is-binary-path": "^1.0.0", + "is-glob": "^4.0.0", + "normalize-path": "^3.0.0", + "path-is-absolute": "^1.0.0", + "readdirp": "^2.2.1", + "upath": "^1.1.1" + } + }, + "chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "dev": true + }, + "chrome-trace-event": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz", + "integrity": "sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ==", + "dev": true, + "requires": { + "tslib": "^1.9.0" + } + }, + "ci-info": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", + "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", + "dev": true + }, + "cipher-base": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", + "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "class-utils": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", + "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "dev": true, + "requires": { + "arr-union": "^3.1.0", + "define-property": "^0.2.5", + "isobject": "^3.0.0", + "static-extend": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true + }, + "cli-cursor": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", + "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", + "dev": true, + "requires": { + "restore-cursor": "^2.0.0" + } + }, + "cli-truncate": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", + "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", + "dev": true, + "requires": { + "slice-ansi": "^3.0.0", + "string-width": "^4.2.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "slice-ansi": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", + "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + } + }, + "string-width": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", + "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + } + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + } + } + }, + "cli-width": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.1.tgz", + "integrity": "sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw==", + "dev": true + }, + "cliui": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", + "dev": true, + "requires": { + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" + } + }, + "collection-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", + "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", + "dev": true, + "requires": { + "map-visit": "^1.0.0", + "object-visit": "^1.0.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", + "dev": true + }, + "commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true + }, + "common-tags": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/common-tags/-/common-tags-1.8.0.tgz", + "integrity": "sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw==", + "dev": true + }, + "commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", + "dev": true + }, + "compare-versions": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/compare-versions/-/compare-versions-3.6.0.tgz", + "integrity": "sha512-W6Af2Iw1z4CB7q4uU4hv646dW9GQuBM+YpC0UvUCWSD8w90SJjp+ujJuXaEMtAXBtSqGfMPuFOVn4/+FlaqfBA==", + "dev": true + }, + "component-emitter": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz", + "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "console-browserify": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz", + "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==", + "dev": true + }, + "constants-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz", + "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U=", + "dev": true + }, + "convert-source-map": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", + "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + } + }, + "copy-concurrently": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz", + "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==", + "dev": true, + "requires": { + "aproba": "^1.1.1", + "fs-write-stream-atomic": "^1.0.8", + "iferr": "^0.1.5", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.0" + } + }, + "copy-descriptor": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", + "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=", + "dev": true + }, + "core-js": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", + "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==", + "dev": true + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cosmiconfig": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.0.tgz", + "integrity": "sha512-pondGvTuVYDk++upghXJabWzL6Kxu6f26ljFw64Swq9v6sQPUL3EUlVDV56diOjpCayKihL6hVe8exIACU4XcA==", + "dev": true, + "requires": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + } + }, + "create-ecdh": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", + "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "elliptic": "^6.0.0" + } + }, + "create-hash": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", + "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", + "dev": true, + "requires": { + "cipher-base": "^1.0.1", + "inherits": "^2.0.1", + "md5.js": "^1.3.4", + "ripemd160": "^2.0.1", + "sha.js": "^2.4.0" + } + }, + "create-hmac": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", + "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", + "dev": true, + "requires": { + "cipher-base": "^1.0.3", + "create-hash": "^1.1.0", + "inherits": "^2.0.1", + "ripemd160": "^2.0.0", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dev": true, + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "crypto-browserify": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz", + "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==", + "dev": true, + "requires": { + "browserify-cipher": "^1.0.0", + "browserify-sign": "^4.0.0", + "create-ecdh": "^4.0.0", + "create-hash": "^1.1.0", + "create-hmac": "^1.1.0", + "diffie-hellman": "^5.0.0", + "inherits": "^2.0.1", + "pbkdf2": "^3.0.3", + "public-encrypt": "^4.0.0", + "randombytes": "^2.0.0", + "randomfill": "^1.0.3" + } + }, + "css-loader": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-3.4.2.tgz", + "integrity": "sha512-jYq4zdZT0oS0Iykt+fqnzVLRIeiPWhka+7BqPn+oSIpWJAHak5tmB/WZrJ2a21JhCeFyNnnlroSl8c+MtVndzA==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "cssesc": "^3.0.0", + "icss-utils": "^4.1.1", + "loader-utils": "^1.2.3", + "normalize-path": "^3.0.0", + "postcss": "^7.0.23", + "postcss-modules-extract-imports": "^2.0.0", + "postcss-modules-local-by-default": "^3.0.2", + "postcss-modules-scope": "^2.1.1", + "postcss-modules-values": "^3.0.0", + "postcss-value-parser": "^4.0.2", + "schema-utils": "^2.6.0" + }, + "dependencies": { + "schema-utils": { + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.6.5.tgz", + "integrity": "sha512-5KXuwKziQrTVHh8j/Uxz+QUbxkaLW9X/86NBlx/gnKgtsZA2GIVMUn17qWhRFwF8jdYb3Dig5hRO/W5mZqy6SQ==", + "dev": true, + "requires": { + "ajv": "^6.12.0", + "ajv-keywords": "^3.4.1" + } + } + } + }, + "cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true + }, + "cyclist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz", + "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk=", + "dev": true + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "decode-uri-component": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", + "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=", + "dev": true + }, + "dedent": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz", + "integrity": "sha1-JJXduvbrh0q7Dhvp3yLS5aVEMmw=", + "dev": true + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", + "dev": true + }, + "define-property": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", + "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "dev": true, + "requires": { + "is-descriptor": "^1.0.2", + "isobject": "^3.0.1" + }, + "dependencies": { + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "des.js": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz", + "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0" + } + }, + "detect-file": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz", + "integrity": "sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc=", + "dev": true + }, + "detect-indent": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz", + "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=", + "dev": true, + "requires": { + "repeating": "^2.0.0" + } + }, + "diffie-hellman": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", + "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "miller-rabin": "^4.0.0", + "randombytes": "^2.0.0" + } + }, + "dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "requires": { + "path-type": "^4.0.0" + } + }, + "dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true + }, + "doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "requires": { + "esutils": "^2.0.2" + } + }, + "domain-browser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz", + "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==", + "dev": true + }, + "duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dev": true, + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "electron-to-chromium": { + "version": "1.3.380", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.380.tgz", + "integrity": "sha512-2jhQxJKcjcSpVOQm0NAfuLq8o+130blrcawoumdXT6411xG/xIAOyZodO/y7WTaYlz/NHe3sCCAe/cJLnDsqTw==", + "dev": true + }, + "elliptic": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", + "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", + "dev": true, + "requires": { + "bn.js": "^4.4.0", + "brorand": "^1.0.1", + "hash.js": "^1.0.0", + "hmac-drbg": "^1.0.0", + "inherits": "^2.0.1", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.0" + } + }, + "emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", + "dev": true + }, + "emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "dev": true + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "enhanced-resolve": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz", + "integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "memory-fs": "^0.5.0", + "tapable": "^1.0.0" + }, + "dependencies": { + "memory-fs": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", + "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", + "dev": true, + "requires": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + } + } + } + }, + "enquirer": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz", + "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==", + "dev": true, + "requires": { + "ansi-colors": "^4.1.1" + } + }, + "errno": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.7.tgz", + "integrity": "sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==", + "dev": true, + "requires": { + "prr": "~1.0.1" + } + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "eslint": { + "version": "7.14.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.14.0.tgz", + "integrity": "sha512-5YubdnPXrlrYAFCKybPuHIAH++PINe1pmKNc5wQRB9HSbqIK1ywAnntE3Wwua4giKu0bjligf1gLF6qxMGOYRA==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "@eslint/eslintrc": "^0.2.1", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "enquirer": "^2.3.5", + "eslint-scope": "^5.1.1", + "eslint-utils": "^2.1.0", + "eslint-visitor-keys": "^2.0.0", + "espree": "^7.3.0", + "esquery": "^1.2.0", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob-parent": "^5.0.0", + "globals": "^12.1.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "js-yaml": "^3.13.1", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash": "^4.17.19", + "minimatch": "^3.0.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "progress": "^2.0.0", + "regexpp": "^3.1.0", + "semver": "^7.2.1", + "strip-ansi": "^6.0.0", + "strip-json-comments": "^3.1.0", + "table": "^5.2.3", + "text-table": "^0.2.0", + "v8-compile-cache": "^2.0.3" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + }, + "eslint-visitor-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.0.0.tgz", + "integrity": "sha512-QudtT6av5WXels9WjIM7qz1XD1cWGvX4gGXvp/zBn9nXG02D0utdU3Em2m/QjTnrsk6bBjmCygl3rmj118msQQ==", + "dev": true + }, + "glob-parent": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", + "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "globals": { + "version": "12.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", + "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", + "dev": true, + "requires": { + "type-fest": "^0.8.1" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "semver": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", + "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "eslint-scope": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz", + "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", + "dev": true, + "requires": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + } + }, + "eslint-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz", + "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "dev": true + }, + "espree": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.0.tgz", + "integrity": "sha512-dksIWsvKCixn1yrEXO8UosNSxaDoSYpq9reEjZSbHLpT5hpaCAKTLBwq0RHtLrIr+c0ByiYzWT8KTMRzoRCNlw==", + "dev": true, + "requires": { + "acorn": "^7.4.0", + "acorn-jsx": "^5.2.0", + "eslint-visitor-keys": "^1.3.0" + }, + "dependencies": { + "acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true + } + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, + "esquery": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.3.1.tgz", + "integrity": "sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ==", + "dev": true, + "requires": { + "estraverse": "^5.1.0" + }, + "dependencies": { + "estraverse": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "dev": true + } + } + }, + "esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "requires": { + "estraverse": "^5.2.0" + }, + "dependencies": { + "estraverse": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==", + "dev": true + } + } + }, + "estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true + }, + "esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true + }, + "events": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.1.0.tgz", + "integrity": "sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg==", + "dev": true + }, + "evp_bytestokey": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", + "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", + "dev": true, + "requires": { + "md5.js": "^1.3.4", + "safe-buffer": "^5.1.1" + } + }, + "execa": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", + "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", + "dev": true, + "requires": { + "cross-spawn": "^6.0.0", + "get-stream": "^4.0.0", + "is-stream": "^1.1.0", + "npm-run-path": "^2.0.0", + "p-finally": "^1.0.0", + "signal-exit": "^3.0.0", + "strip-eof": "^1.0.0" + } + }, + "expand-brackets": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", + "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", + "dev": true, + "requires": { + "debug": "^2.3.3", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "posix-character-classes": "^0.1.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "expand-tilde": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz", + "integrity": "sha1-l+gBqgUt8CRU3kawK/YhZCzchQI=", + "dev": true, + "requires": { + "homedir-polyfill": "^1.0.1" + } + }, + "extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", + "dev": true, + "requires": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4" + } + } + } + }, + "external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dev": true, + "requires": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + } + }, + "extglob": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", + "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "dev": true, + "requires": { + "array-unique": "^0.3.2", + "define-property": "^1.0.0", + "expand-brackets": "^2.1.4", + "extend-shallow": "^2.0.1", + "fragment-cache": "^0.2.1", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "fast-deep-equal": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", + "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==", + "dev": true + }, + "fast-glob": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.4.tgz", + "integrity": "sha512-kr/Oo6PX51265qeuCYsyGypiO5uJFgBS0jksyG7FUeCyQzNwYnzrNIMR1NXfkZXsMYXYLRAHgISHBz8gQcxKHQ==", + "dev": true, + "requires": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.0", + "merge2": "^1.3.0", + "micromatch": "^4.0.2", + "picomatch": "^2.2.1" + }, + "dependencies": { + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "glob-parent": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", + "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "micromatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", + "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", + "dev": true, + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.0.5" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + } + } + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, + "fastq": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.9.0.tgz", + "integrity": "sha512-i7FVWL8HhVY+CTkwFxkN2mk3h+787ixS5S63eb78diVRc1MCssarHq3W5cj0av7YDSwmaV928RNag+U1etRQ7w==", + "dev": true, + "requires": { + "reusify": "^1.0.4" + } + }, + "figgy-pudding": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", + "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==", + "dev": true + }, + "figures": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", + "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "file-entry-cache": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", + "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", + "dev": true, + "requires": { + "flat-cache": "^2.0.1" + } + }, + "file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "dev": true, + "optional": true + }, + "fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "dev": true, + "requires": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, + "find-versions": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-3.2.0.tgz", + "integrity": "sha512-P8WRou2S+oe222TOCHitLy8zj+SIsVJh52VP4lvXkaFVnOFFdoWv1H1Jjvel1aI6NCFOAaeAVm8qrI0odiLcww==", + "dev": true, + "requires": { + "semver-regex": "^2.0.0" + } + }, + "findup-sync": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-3.0.0.tgz", + "integrity": "sha512-YbffarhcicEhOrm4CtrwdKBdCuz576RLdhJDsIfvNtxUuhdRet1qZcsMjqbePtAseKdAnDyM/IyXbu7PRPRLYg==", + "dev": true, + "requires": { + "detect-file": "^1.0.0", + "is-glob": "^4.0.0", + "micromatch": "^3.0.4", + "resolve-dir": "^1.0.1" + } + }, + "flat-cache": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", + "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", + "dev": true, + "requires": { + "flatted": "^2.0.0", + "rimraf": "2.6.3", + "write": "1.0.3" + }, + "dependencies": { + "rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dev": true, + "requires": { + "glob": "^7.1.3" + } + } + } + }, + "flatted": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", + "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", + "dev": true + }, + "flush-write-stream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz", + "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "readable-stream": "^2.3.6" + } + }, + "for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "dev": true + }, + "fragment-cache": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", + "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", + "dev": true, + "requires": { + "map-cache": "^0.2.2" + } + }, + "from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "fs-write-stream-atomic": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", + "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "1.2.12", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.12.tgz", + "integrity": "sha512-Ggd/Ktt7E7I8pxZRbGIs7vwqAPscSESMrCSkx2FtWeqmheJgCo2R74fTsZFCifr0VTPwqRpPv17+6b8Zp7th0Q==", + "dev": true, + "optional": true, + "requires": { + "bindings": "^1.5.0", + "nan": "^2.12.1", + "node-pre-gyp": "*" + }, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true, + "dev": true, + "optional": true + }, + "are-we-there-yet": { + "version": "1.1.5", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.1.4", + "bundled": true, + "dev": true, + "optional": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "optional": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true, + "dev": true, + "optional": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "debug": { + "version": "3.2.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "ms": "^2.1.1" + } + }, + "deep-extend": { + "version": "0.6.0", + "bundled": true, + "dev": true, + "optional": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true, + "dev": true, + "optional": true + }, + "fs-minipass": { + "version": "1.2.7", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minipass": "^2.6.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "glob": { + "version": "7.1.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "iconv-lite": { + "version": "0.4.24", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "ignore-walk": { + "version": "3.0.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "bundled": true, + "dev": true, + "optional": true + }, + "ini": { + "version": "1.3.5", + "bundled": true, + "dev": true, + "optional": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.5", + "bundled": true, + "dev": true, + "optional": true + }, + "minipass": { + "version": "2.9.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "safe-buffer": "^5.1.2", + "yallist": "^3.0.0" + } + }, + "minizlib": { + "version": "1.3.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minipass": "^2.9.0" + } + }, + "mkdirp": { + "version": "0.5.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "ms": { + "version": "2.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "needle": { + "version": "2.3.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "debug": "^3.2.6", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.14.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "detect-libc": "^1.0.2", + "mkdirp": "^0.5.1", + "needle": "^2.2.1", + "nopt": "^4.0.1", + "npm-packlist": "^1.1.6", + "npmlog": "^4.0.2", + "rc": "^1.2.7", + "rimraf": "^2.6.1", + "semver": "^5.3.0", + "tar": "^4.4.2" + } + }, + "nopt": { + "version": "4.0.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "npm-bundled": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npm-normalize-package-bin": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "npm-packlist": { + "version": "1.4.8", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true, + "dev": true, + "optional": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "wrappy": "1" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "process-nextick-args": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "rc": { + "version": "1.2.8", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "readable-stream": { + "version": "2.3.7", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.7.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "glob": "^7.1.3" + } + }, + "safe-buffer": { + "version": "5.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true, + "dev": true, + "optional": true + }, + "sax": { + "version": "1.2.4", + "bundled": true, + "dev": true, + "optional": true + }, + "semver": { + "version": "5.7.1", + "bundled": true, + "dev": true, + "optional": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true, + "dev": true, + "optional": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true, + "dev": true, + "optional": true + }, + "tar": { + "version": "4.4.13", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "chownr": "^1.1.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.8.6", + "minizlib": "^1.2.1", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.2", + "yallist": "^3.0.3" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "wide-align": { + "version": "1.1.3", + "bundled": true, + "dev": true, + "optional": true, + "requires": { + "string-width": "^1.0.2 || 2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true, + "dev": true, + "optional": true + }, + "yallist": { + "version": "3.1.1", + "bundled": true, + "dev": true, + "optional": true + } + } + }, + "functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", + "dev": true + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true + }, + "get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==", + "dev": true + }, + "get-stdin": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-7.0.0.tgz", + "integrity": "sha512-zRKcywvrXlXsA0v0i9Io4KDRaAw7+a1ZpjRwl9Wox8PFlVCCHra7E9c4kqXCoCM9nR5tBkaTTZRBoCm60bFqTQ==", + "dev": true + }, + "get-stream": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", + "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, + "get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=", + "dev": true + }, + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz", + "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=", + "dev": true, + "requires": { + "is-glob": "^3.1.0", + "path-dirname": "^1.0.0" + }, + "dependencies": { + "is-glob": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz", + "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=", + "dev": true, + "requires": { + "is-extglob": "^2.1.0" + } + } + } + }, + "global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "dev": true, + "requires": { + "global-prefix": "^3.0.0" + }, + "dependencies": { + "global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dev": true, + "requires": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" + } + } + } + }, + "global-prefix": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz", + "integrity": "sha1-2/dDxsFJklk8ZVVoy2btMsASLr4=", + "dev": true, + "requires": { + "expand-tilde": "^2.0.2", + "homedir-polyfill": "^1.0.1", + "ini": "^1.3.4", + "is-windows": "^1.0.1", + "which": "^1.2.14" + } + }, + "globals": { + "version": "9.18.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", + "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", + "dev": true + }, + "globby": { + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz", + "integrity": "sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==", + "dev": true, + "requires": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.1.1", + "ignore": "^5.1.4", + "merge2": "^1.3.0", + "slash": "^3.0.0" + }, + "dependencies": { + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true + }, + "slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true + } + } + }, + "graceful-fs": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", + "dev": true + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + } + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "dev": true + }, + "has-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", + "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", + "dev": true, + "requires": { + "get-value": "^2.0.6", + "has-values": "^1.0.0", + "isobject": "^3.0.0" + } + }, + "has-values": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", + "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", + "dev": true, + "requires": { + "is-number": "^3.0.0", + "kind-of": "^4.0.0" + }, + "dependencies": { + "kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "hash-base": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.0.4.tgz", + "integrity": "sha1-X8hoaEfs1zSZQDMZprCj8/auSRg=", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "hash.js": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", + "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "minimalistic-assert": "^1.0.1" + } + }, + "hmac-drbg": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", + "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", + "dev": true, + "requires": { + "hash.js": "^1.0.3", + "minimalistic-assert": "^1.0.0", + "minimalistic-crypto-utils": "^1.0.1" + } + }, + "home-or-tmp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz", + "integrity": "sha1-42w/LSyufXRqhX440Y1fMqeILbg=", + "dev": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.1" + } + }, + "homedir-polyfill": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz", + "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==", + "dev": true, + "requires": { + "parse-passwd": "^1.0.0" + } + }, + "https-browserify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz", + "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=", + "dev": true + }, + "human-signals": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", + "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", + "dev": true + }, + "husky": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/husky/-/husky-4.3.0.tgz", + "integrity": "sha512-tTMeLCLqSBqnflBZnlVDhpaIMucSGaYyX6855jM4AguGeWCeSzNdb1mfyWduTZ3pe3SJVvVWGL0jO1iKZVPfTA==", + "dev": true, + "requires": { + "chalk": "^4.0.0", + "ci-info": "^2.0.0", + "compare-versions": "^3.6.0", + "cosmiconfig": "^7.0.0", + "find-versions": "^3.2.0", + "opencollective-postinstall": "^2.0.2", + "pkg-dir": "^4.2.0", + "please-upgrade-node": "^3.2.0", + "slash": "^3.0.0", + "which-pm-runs": "^1.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "requires": { + "find-up": "^4.0.0" + } + }, + "slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "icss-utils": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz", + "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==", + "dev": true, + "requires": { + "postcss": "^7.0.14" + } + }, + "ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==", + "dev": true + }, + "iferr": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz", + "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE=", + "dev": true + }, + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true + }, + "import-fresh": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.2.tgz", + "integrity": "sha512-cTPNrlvJT6twpYy+YmKUKrTSjWFs3bjYjAhCwm+z4EOCubZxAuO+hHpRN64TqjEaYSHs7tJAE0w1CKMGmsG/lw==", + "dev": true, + "requires": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "dependencies": { + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true + } + } + }, + "import-local": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz", + "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==", + "dev": true, + "requires": { + "pkg-dir": "^3.0.0", + "resolve-cwd": "^2.0.0" + } + }, + "imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "dev": true + }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true + }, + "indexes-of": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz", + "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc=", + "dev": true + }, + "infer-owner": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "ini": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", + "dev": true + }, + "inquirer": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.5.2.tgz", + "integrity": "sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==", + "dev": true, + "requires": { + "ansi-escapes": "^3.2.0", + "chalk": "^2.4.2", + "cli-cursor": "^2.1.0", + "cli-width": "^2.0.0", + "external-editor": "^3.0.3", + "figures": "^2.0.0", + "lodash": "^4.17.12", + "mute-stream": "0.0.7", + "run-async": "^2.2.0", + "rxjs": "^6.4.0", + "string-width": "^2.1.0", + "strip-ansi": "^5.1.0", + "through": "^2.3.6" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "dependencies": { + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + } + } + }, + "instant.page": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/instant.page/-/instant.page-3.0.0.tgz", + "integrity": "sha512-cWTCMZlMZ/CKtY7+Rf3ZgeNtNPP9Yv/glRyriOMym98OhF484N5dRES9qv8q2MgMPmNwUb6T9mEHMdffICbnzw==" + }, + "interpret": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.2.0.tgz", + "integrity": "sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==", + "dev": true + }, + "invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dev": true, + "requires": { + "loose-envify": "^1.0.0" + } + }, + "invert-kv": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-2.0.0.tgz", + "integrity": "sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==", + "dev": true + }, + "is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "dev": true, + "requires": { + "binary-extensions": "^1.0.0" + } + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + }, + "dependencies": { + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==", + "dev": true + } + } + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "dev": true + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true + }, + "is-finite": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz", + "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=", + "dev": true + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha1-/S2INUXEa6xaYz57mgnof6LLUGk=", + "dev": true + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true + }, + "is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true + }, + "is-wsl": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", + "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=", + "dev": true + }, + "js-tokens": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", + "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=", + "dev": true + }, + "js-yaml": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsesc": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz", + "integrity": "sha1-RsP+yMGJKxKwgz25vHYiF226s0s=", + "dev": true + }, + "json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", + "dev": true + }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "dev": true, + "requires": { + "minimist": "^1.2.0" + } + }, + "kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true + }, + "lcid": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/lcid/-/lcid-2.0.0.tgz", + "integrity": "sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==", + "dev": true, + "requires": { + "invert-kv": "^2.0.0" + } + }, + "levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "requires": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + } + }, + "lines-and-columns": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", + "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", + "dev": true + }, + "lint-staged": { + "version": "10.5.1", + "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-10.5.1.tgz", + "integrity": "sha512-fTkTGFtwFIJJzn/PbUO3RXyEBHIhbfYBE7+rJyLcOXabViaO/h6OslgeK6zpeUtzkDrzkgyAYDTLAwx6JzDTHw==", + "dev": true, + "requires": { + "chalk": "^4.1.0", + "cli-truncate": "^2.1.0", + "commander": "^6.2.0", + "cosmiconfig": "^7.0.0", + "debug": "^4.2.0", + "dedent": "^0.7.0", + "enquirer": "^2.3.6", + "execa": "^4.1.0", + "listr2": "^3.2.2", + "log-symbols": "^4.0.0", + "micromatch": "^4.0.2", + "normalize-path": "^3.0.0", + "please-upgrade-node": "^3.2.0", + "string-argv": "0.3.1", + "stringify-object": "^3.3.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "commander": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.0.tgz", + "integrity": "sha512-zP4jEKbe8SHzKJYQmq8Y9gYjtO/POJLgIdKgV7B9qNmABVFVc+ctqSX6iXh4mCpJfRBOabiZ2YKPg8ciDw6C+Q==", + "dev": true + }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "execa": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", + "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.0", + "get-stream": "^5.0.0", + "human-signals": "^1.1.1", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.0", + "onetime": "^5.1.0", + "signal-exit": "^3.0.2", + "strip-final-newline": "^2.0.0" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "requires": { + "pump": "^3.0.0" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "is-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", + "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", + "dev": true + }, + "micromatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", + "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", + "dev": true, + "requires": { + "braces": "^3.0.1", + "picomatch": "^2.0.5" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true + }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + } + } + }, + "listr2": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/listr2/-/listr2-3.2.2.tgz", + "integrity": "sha512-AajqcZEUikF2ioph6PfH3dIuxJclhr3i3kHgTOP0xeXdWQohrvJAAmqVcV43/GI987HFY/vzT73jYXoa4esDHg==", + "dev": true, + "requires": { + "chalk": "^4.1.0", + "cli-truncate": "^2.1.0", + "figures": "^3.2.0", + "indent-string": "^4.0.0", + "log-update": "^4.0.0", + "p-map": "^4.0.0", + "rxjs": "^6.6.3", + "through": "^2.3.8" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "loader-runner": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz", + "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==", + "dev": true + }, + "loader-utils": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", + "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", + "dev": true, + "requires": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^1.0.1" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "lodash": { + "version": "4.17.19", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", + "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" + }, + "lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4=", + "dev": true + }, + "lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "lodash.unescape": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.unescape/-/lodash.unescape-4.0.1.tgz", + "integrity": "sha1-vyJJiGzlFM2hEvrpIYzcBlIR/Jw=", + "dev": true + }, + "log-symbols": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", + "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", + "dev": true, + "requires": { + "chalk": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "chalk": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", + "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "log-update": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/log-update/-/log-update-4.0.0.tgz", + "integrity": "sha512-9fkkDevMefjg0mmzWFBW8YkFP91OrizzkW3diF7CpG+S2EYdy4+TVfGwz1zeF8x7hCx1ovSPTOE9Ngib74qqUg==", + "dev": true, + "requires": { + "ansi-escapes": "^4.3.0", + "cli-cursor": "^3.1.0", + "slice-ansi": "^4.0.0", + "wrap-ansi": "^6.2.0" + }, + "dependencies": { + "ansi-escapes": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", + "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", + "dev": true, + "requires": { + "type-fest": "^0.11.0" + } + }, + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "astral-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", + "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", + "dev": true + }, + "cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "requires": { + "restore-cursor": "^3.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, + "restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "requires": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + } + }, + "slice-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", + "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "astral-regex": "^2.0.0", + "is-fullwidth-code-point": "^3.0.0" + } + }, + "string-width": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", + "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + } + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "type-fest": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", + "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==", + "dev": true + }, + "wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + } + } + }, + "loglevel": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.0.tgz", + "integrity": "sha512-i2sY04nal5jDcagM3FMfG++T69GEEM8CYuOfeOIvmXzOIcwE9a/CJPR0MFM97pYMj/u10lzz7/zd7+qwhrBTqQ==", + "dev": true + }, + "loglevel-colored-level-prefix": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/loglevel-colored-level-prefix/-/loglevel-colored-level-prefix-1.0.0.tgz", + "integrity": "sha1-akAhj9x64V/HbD0PPmdsRlOIYD4=", + "dev": true, + "requires": { + "chalk": "^1.1.3", + "loglevel": "^1.4.1" + }, + "dependencies": { + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + } + } + }, + "loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dev": true, + "requires": { + "js-tokens": "^3.0.0 || ^4.0.0" + } + }, + "lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "requires": { + "yallist": "^3.0.2" + } + }, + "lunr": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.8.tgz", + "integrity": "sha512-oxMeX/Y35PNFuZoHp+jUj5OSEmLCaIH4KTFJh7a93cHBoFmpw2IoPs22VIz7vyO2YUnx2Tn9dzIwO2P/4quIRg==" + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "dev": true, + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + } + }, + "make-plural": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/make-plural/-/make-plural-4.3.0.tgz", + "integrity": "sha512-xTYd4JVHpSCW+aqDof6w/MebaMVNTVYBZhbB/vi513xXdiPT92JMVCo0Jq8W2UZnzYRFeVbQiQ+I25l13JuKvA==", + "dev": true, + "requires": { + "minimist": "^1.2.0" + } + }, + "mamacro": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/mamacro/-/mamacro-0.0.3.tgz", + "integrity": "sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA==", + "dev": true + }, + "map-age-cleaner": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", + "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==", + "dev": true, + "requires": { + "p-defer": "^1.0.0" + } + }, + "map-cache": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", + "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=", + "dev": true + }, + "map-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.1.0.tgz", + "integrity": "sha512-glc9y00wgtwcDmp7GaE/0b0OnxpNJsVf3ael/An6Fe2Q51LLwN1er6sdomLRzz5h0+yMpiYLhWYF5R7HeqVd4g==", + "dev": true + }, + "map-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", + "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", + "dev": true, + "requires": { + "object-visit": "^1.0.0" + } + }, + "mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha1-GA8fnr74sOY45BZq1S24eb6y/8U=", + "dev": true + }, + "md5.js": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", + "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "mem": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/mem/-/mem-4.3.0.tgz", + "integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==", + "dev": true, + "requires": { + "map-age-cleaner": "^0.1.1", + "mimic-fn": "^2.0.0", + "p-is-promise": "^2.0.0" + } + }, + "memory-fs": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz", + "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=", + "dev": true, + "requires": { + "errno": "^0.1.3", + "readable-stream": "^2.0.1" + } + }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true + }, + "messageformat": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/messageformat/-/messageformat-2.3.0.tgz", + "integrity": "sha512-uTzvsv0lTeQxYI2y1NPa1lItL5VRI8Gb93Y2K2ue5gBPyrbJxfDi/EYWxh2PKv5yO42AJeeqblS9MJSh/IEk4w==", + "dev": true, + "requires": { + "make-plural": "^4.3.0", + "messageformat-formatters": "^2.0.1", + "messageformat-parser": "^4.1.2" + } + }, + "messageformat-formatters": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/messageformat-formatters/-/messageformat-formatters-2.0.1.tgz", + "integrity": "sha512-E/lQRXhtHwGuiQjI7qxkLp8AHbMD5r2217XNe/SREbBlSawe0lOqsFb7rflZJmlQFSULNLIqlcjjsCPlB3m3Mg==", + "dev": true + }, + "messageformat-parser": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/messageformat-parser/-/messageformat-parser-4.1.3.tgz", + "integrity": "sha512-2fU3XDCanRqeOCkn7R5zW5VQHWf+T3hH65SzuqRvjatBK7r4uyFa5mEX+k6F9Bd04LVM5G4/BHBTUJsOdW7uyg==", + "dev": true + }, + "micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "dev": true, + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + } + }, + "miller-rabin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz", + "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==", + "dev": true, + "requires": { + "bn.js": "^4.0.0", + "brorand": "^1.0.1" + } + }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true + }, + "minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", + "dev": true + }, + "minimalistic-crypto-utils": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", + "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "dev": true + }, + "mississippi": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz", + "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==", + "dev": true, + "requires": { + "concat-stream": "^1.5.0", + "duplexify": "^3.4.2", + "end-of-stream": "^1.1.0", + "flush-write-stream": "^1.0.0", + "from2": "^2.1.0", + "parallel-transform": "^1.1.0", + "pump": "^3.0.0", + "pumpify": "^1.3.3", + "stream-each": "^1.1.0", + "through2": "^2.0.0" + } + }, + "mixin-deep": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz", + "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==", + "dev": true, + "requires": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "dev": true, + "requires": { + "is-plain-object": "^2.0.4" + } + } + } + }, + "mkdirp": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.3.tgz", + "integrity": "sha512-P+2gwrFqx8lhew375MQHHeTlY8AuOJSrGf0R5ddkEndUkmwpgUob/vQuBD1V22/Cw1/lJr4x+EjllSezBThzBg==", + "dev": true, + "requires": { + "minimist": "^1.2.5" + } + }, + "move-concurrently": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", + "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=", + "dev": true, + "requires": { + "aproba": "^1.1.1", + "copy-concurrently": "^1.0.0", + "fs-write-stream-atomic": "^1.0.8", + "mkdirp": "^0.5.1", + "rimraf": "^2.5.4", + "run-queue": "^1.0.3" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "mute-stream": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz", + "integrity": "sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=", + "dev": true + }, + "nan": { + "version": "2.14.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz", + "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==", + "dev": true, + "optional": true + }, + "nanomatch": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", + "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "dev": true, + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "fragment-cache": "^0.2.1", + "is-windows": "^1.0.2", + "kind-of": "^6.0.2", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + } + }, + "natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", + "dev": true + }, + "neo-async": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz", + "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==", + "dev": true + }, + "nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", + "dev": true + }, + "node-libs-browser": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz", + "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==", + "dev": true, + "requires": { + "assert": "^1.1.1", + "browserify-zlib": "^0.2.0", + "buffer": "^4.3.0", + "console-browserify": "^1.1.0", + "constants-browserify": "^1.0.0", + "crypto-browserify": "^3.11.0", + "domain-browser": "^1.1.1", + "events": "^3.0.0", + "https-browserify": "^1.0.0", + "os-browserify": "^0.3.0", + "path-browserify": "0.0.1", + "process": "^0.11.10", + "punycode": "^1.2.4", + "querystring-es3": "^0.2.0", + "readable-stream": "^2.3.3", + "stream-browserify": "^2.0.1", + "stream-http": "^2.7.2", + "string_decoder": "^1.0.0", + "timers-browserify": "^2.0.4", + "tty-browserify": "0.0.0", + "url": "^0.11.0", + "util": "^0.11.0", + "vm-browserify": "^1.0.1" + }, + "dependencies": { + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + } + } + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "npm-run-path": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", + "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", + "dev": true, + "requires": { + "path-key": "^2.0.0" + } + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-copy": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", + "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", + "dev": true, + "requires": { + "copy-descriptor": "^0.1.0", + "define-property": "^0.2.5", + "kind-of": "^3.0.3" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "object-visit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", + "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", + "dev": true, + "requires": { + "isobject": "^3.0.0" + } + }, + "object.pick": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", + "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", + "dev": true, + "requires": { + "isobject": "^3.0.1" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "onetime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", + "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", + "dev": true, + "requires": { + "mimic-fn": "^1.0.0" + }, + "dependencies": { + "mimic-fn": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", + "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", + "dev": true + } + } + }, + "opencollective-postinstall": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz", + "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q==", + "dev": true + }, + "optionator": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "dev": true, + "requires": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" + } + }, + "os-browserify": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", + "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=", + "dev": true + }, + "os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", + "dev": true + }, + "os-locale": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.1.0.tgz", + "integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==", + "dev": true, + "requires": { + "execa": "^1.0.0", + "lcid": "^2.0.0", + "mem": "^4.0.0" + } + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "dev": true + }, + "p-defer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz", + "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=", + "dev": true + }, + "p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", + "dev": true + }, + "p-is-promise": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.1.0.tgz", + "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==", + "dev": true + }, + "p-limit": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", + "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" + } + }, + "p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dev": true, + "requires": { + "aggregate-error": "^3.0.0" + } + }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, + "pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "dev": true + }, + "parallel-transform": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz", + "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==", + "dev": true, + "requires": { + "cyclist": "^1.0.1", + "inherits": "^2.0.3", + "readable-stream": "^2.1.5" + } + }, + "parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "requires": { + "callsites": "^3.0.0" + } + }, + "parse-asn1": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz", + "integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==", + "dev": true, + "requires": { + "asn1.js": "^4.0.0", + "browserify-aes": "^1.0.0", + "create-hash": "^1.1.0", + "evp_bytestokey": "^1.0.0", + "pbkdf2": "^3.0.3", + "safe-buffer": "^5.1.1" + } + }, + "parse-json": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.1.0.tgz", + "integrity": "sha512-+mi/lmVVNKFNVyLXV31ERiy2CY5E1/F6QtJFEzoChPRwwngMNXRDQ9GJ5WdE2Z2P4AujsOi0/+2qHID68KwfIQ==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "parse-passwd": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz", + "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=", + "dev": true + }, + "pascalcase": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", + "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", + "dev": true + }, + "path-browserify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz", + "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ==", + "dev": true + }, + "path-dirname": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", + "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=", + "dev": true + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=", + "dev": true + }, + "path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "dev": true + }, + "path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true + }, + "pbkdf2": { + "version": "3.0.17", + "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz", + "integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==", + "dev": true, + "requires": { + "create-hash": "^1.1.2", + "create-hmac": "^1.1.4", + "ripemd160": "^2.0.1", + "safe-buffer": "^5.0.1", + "sha.js": "^2.4.8" + } + }, + "picomatch": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", + "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", + "dev": true + }, + "pify": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "dev": true, + "requires": { + "find-up": "^3.0.0" + } + }, + "please-upgrade-node": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/please-upgrade-node/-/please-upgrade-node-3.2.0.tgz", + "integrity": "sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg==", + "dev": true, + "requires": { + "semver-compare": "^1.0.0" + } + }, + "posix-character-classes": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", + "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=", + "dev": true + }, + "postcss": { + "version": "7.0.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.27.tgz", + "integrity": "sha512-WuQETPMcW9Uf1/22HWUWP9lgsIC+KEHg2kozMflKjbeUtw9ujvFX6QmIfozaErDkmLWS9WEnEdEe6Uo9/BNTdQ==", + "dev": true, + "requires": { + "chalk": "^2.4.2", + "source-map": "^0.6.1", + "supports-color": "^6.1.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "postcss-modules-extract-imports": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz", + "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==", + "dev": true, + "requires": { + "postcss": "^7.0.5" + } + }, + "postcss-modules-local-by-default": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-3.0.2.tgz", + "integrity": "sha512-jM/V8eqM4oJ/22j0gx4jrp63GSvDH6v86OqyTHHUvk4/k1vceipZsaymiZ5PvocqZOl5SFHiFJqjs3la0wnfIQ==", + "dev": true, + "requires": { + "icss-utils": "^4.1.1", + "postcss": "^7.0.16", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.0.0" + } + }, + "postcss-modules-scope": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz", + "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==", + "dev": true, + "requires": { + "postcss": "^7.0.6", + "postcss-selector-parser": "^6.0.0" + } + }, + "postcss-modules-values": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-3.0.0.tgz", + "integrity": "sha512-1//E5jCBrZ9DmRX+zCtmQtRSV6PV42Ix7Bzj9GbwJceduuf7IqP8MgeTXuRDHOWj2m0VzZD5+roFWDuU8RQjcg==", + "dev": true, + "requires": { + "icss-utils": "^4.0.0", + "postcss": "^7.0.6" + } + }, + "postcss-selector-parser": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz", + "integrity": "sha512-36P2QR59jDTOAiIkqEprfJDsoNrvwFei3eCqKd1Y0tUsBimsq39BLp7RD+JWny3WgB1zGhJX8XVePwm9k4wdBg==", + "dev": true, + "requires": { + "cssesc": "^3.0.0", + "indexes-of": "^1.0.1", + "uniq": "^1.0.1" + } + }, + "postcss-value-parser": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.3.tgz", + "integrity": "sha512-N7h4pG+Nnu5BEIzyeaaIYWs0LI5XC40OrRh5L60z0QjFsqGWcHcbkBvpe1WYpcIS9yQ8sOi/vIPt1ejQCrMVrg==", + "dev": true + }, + "prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true + }, + "prettier": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.2.0.tgz", + "integrity": "sha512-yYerpkvseM4iKD/BXLYUkQV5aKt4tQPqaGW6EsZjzyu0r7sVZZNPJW4Y8MyKmicp6t42XUPcBVA+H6sB3gqndw==", + "dev": true + }, + "prettier-eslint": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/prettier-eslint/-/prettier-eslint-12.0.0.tgz", + "integrity": "sha512-N8SGGQwAosISXTNl1E57sBbtnqUGlyRWjcfIUxyD3HF4ynehA9GZ8IfJgiep/OfYvCof/JEpy9ZqSl250Wia7A==", + "dev": true, + "requires": { + "@typescript-eslint/parser": "^3.0.0", + "common-tags": "^1.4.0", + "dlv": "^1.1.0", + "eslint": "^7.9.0", + "indent-string": "^4.0.0", + "lodash.merge": "^4.6.0", + "loglevel-colored-level-prefix": "^1.0.0", + "prettier": "^2.0.0", + "pretty-format": "^23.0.1", + "require-relative": "^0.8.7", + "typescript": "^3.9.3", + "vue-eslint-parser": "~7.1.0" + } + }, + "prettier-eslint-cli": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/prettier-eslint-cli/-/prettier-eslint-cli-5.0.0.tgz", + "integrity": "sha512-cei9UbN1aTrz3sQs88CWpvY/10PYTevzd76zoG1tdJ164OhmNTFRKPTOZrutVvscoQWzbnLKkviS3gu5JXwvZg==", + "dev": true, + "requires": { + "arrify": "^2.0.1", + "boolify": "^1.0.0", + "camelcase-keys": "^6.0.0", + "chalk": "^2.4.2", + "common-tags": "^1.8.0", + "core-js": "^3.1.4", + "eslint": "^5.0.0", + "find-up": "^4.1.0", + "get-stdin": "^7.0.0", + "glob": "^7.1.4", + "ignore": "^5.1.2", + "lodash.memoize": "^4.1.2", + "loglevel-colored-level-prefix": "^1.0.0", + "messageformat": "^2.2.1", + "prettier-eslint": "^9.0.0", + "rxjs": "^6.5.2", + "yargs": "^13.2.4" + }, + "dependencies": { + "@typescript-eslint/experimental-utils": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-1.13.0.tgz", + "integrity": "sha512-zmpS6SyqG4ZF64ffaJ6uah6tWWWgZ8m+c54XXgwFtUv0jNz8aJAVx8chMCvnk7yl6xwn8d+d96+tWp7fXzTuDg==", + "dev": true, + "requires": { + "@types/json-schema": "^7.0.3", + "@typescript-eslint/typescript-estree": "1.13.0", + "eslint-scope": "^4.0.0" + } + }, + "@typescript-eslint/parser": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-1.13.0.tgz", + "integrity": "sha512-ITMBs52PCPgLb2nGPoeT4iU3HdQZHcPaZVw+7CsFagRJHUhyeTgorEwHXhFf3e7Evzi8oujKNpHc8TONth8AdQ==", + "dev": true, + "requires": { + "@types/eslint-visitor-keys": "^1.0.0", + "@typescript-eslint/experimental-utils": "1.13.0", + "@typescript-eslint/typescript-estree": "1.13.0", + "eslint-visitor-keys": "^1.0.0" + } + }, + "@typescript-eslint/typescript-estree": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-1.13.0.tgz", + "integrity": "sha512-b5rCmd2e6DCC6tCTN9GSUAuxdYwCM/k/2wdjHGrIRGPSJotWMCe/dGpi66u42bhuh8q3QBzqM4TMA1GUUCJvdw==", + "dev": true, + "requires": { + "lodash.unescape": "4.0.1", + "semver": "5.5.0" + }, + "dependencies": { + "semver": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", + "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==", + "dev": true + } + } + }, + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "core-js": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.7.0.tgz", + "integrity": "sha512-NwS7fI5M5B85EwpWuIwJN4i/fbisQUwLwiSNUWeXlkAZ0sbBjLEvLvFLf1uzAUV66PcEPt4xCGCmOZSxVf3xzA==", + "dev": true + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "eslint": { + "version": "5.16.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.16.0.tgz", + "integrity": "sha512-S3Rz11i7c8AA5JPv7xAH+dOyq/Cu/VXHiHXBPOU1k/JAM5dXqQPt3qcrhpHSorXmrpu2g0gkIBVXAqCpzfoZIg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.9.1", + "chalk": "^2.1.0", + "cross-spawn": "^6.0.5", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "eslint-scope": "^4.0.3", + "eslint-utils": "^1.3.1", + "eslint-visitor-keys": "^1.0.0", + "espree": "^5.0.1", + "esquery": "^1.0.1", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob": "^7.1.2", + "globals": "^11.7.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^6.2.2", + "js-yaml": "^3.13.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.3.0", + "lodash": "^4.17.11", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.1", + "natural-compare": "^1.4.0", + "optionator": "^0.8.2", + "path-is-inside": "^1.0.2", + "progress": "^2.0.0", + "regexpp": "^2.0.1", + "semver": "^5.5.1", + "strip-ansi": "^4.0.0", + "strip-json-comments": "^2.0.1", + "table": "^5.2.3", + "text-table": "^0.2.0" + }, + "dependencies": { + "ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "dev": true + } + } + }, + "eslint-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", + "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "dev": true, + "requires": { + "eslint-visitor-keys": "^1.1.0" + } + }, + "espree": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-5.0.1.tgz", + "integrity": "sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A==", + "dev": true, + "requires": { + "acorn": "^6.0.7", + "acorn-jsx": "^5.0.0", + "eslint-visitor-keys": "^1.0.0" + } + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true + }, + "ignore": { + "version": "5.1.8", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", + "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==", + "dev": true + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dev": true, + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "prettier": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz", + "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", + "dev": true + }, + "prettier-eslint": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/prettier-eslint/-/prettier-eslint-9.0.2.tgz", + "integrity": "sha512-u6EQqxUhaGfra9gy9shcR7MT7r/2twwEfRGy1tfzyaJvLQwSg34M9IU5HuF7FsLW2QUgr5VIUc56EPWibw1pdw==", + "dev": true, + "requires": { + "@typescript-eslint/parser": "^1.10.2", + "common-tags": "^1.4.0", + "core-js": "^3.1.4", + "dlv": "^1.1.0", + "eslint": "^5.0.0", + "indent-string": "^4.0.0", + "lodash.merge": "^4.6.0", + "loglevel-colored-level-prefix": "^1.0.0", + "prettier": "^1.7.0", + "pretty-format": "^23.0.1", + "require-relative": "^0.8.7", + "typescript": "^3.2.1", + "vue-eslint-parser": "^2.0.2" + } + }, + "regexpp": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", + "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "dev": true + }, + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "dev": true + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "vue-eslint-parser": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-2.0.3.tgz", + "integrity": "sha512-ZezcU71Owm84xVF6gfurBQUGg8WQ+WZGxgDEQu1IHFBZNx7BFZg3L1yHxrCBNNwbwFtE1GuvfJKMtb6Xuwc/Bw==", + "dev": true, + "requires": { + "debug": "^3.1.0", + "eslint-scope": "^3.7.1", + "eslint-visitor-keys": "^1.0.0", + "espree": "^3.5.2", + "esquery": "^1.0.0", + "lodash": "^4.17.4" + }, + "dependencies": { + "acorn": { + "version": "5.7.4", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.4.tgz", + "integrity": "sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg==", + "dev": true + }, + "acorn-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-3.0.1.tgz", + "integrity": "sha1-r9+UiPsezvyDSPb7IvRk4ypYs2s=", + "dev": true, + "requires": { + "acorn": "^3.0.4" + }, + "dependencies": { + "acorn": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-3.3.0.tgz", + "integrity": "sha1-ReN/s56No/JbruP/U2niu18iAXo=", + "dev": true + } + } + }, + "debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + }, + "eslint-scope": { + "version": "3.7.3", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-3.7.3.tgz", + "integrity": "sha512-W+B0SvF4gamyCTmUc+uITPY0989iXVfKvhwtmJocTaYoc/3khEHmEmvfY/Gn9HA9VV75jrQECsHizkNw1b68FA==", + "dev": true, + "requires": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + } + }, + "espree": { + "version": "3.5.4", + "resolved": "https://registry.npmjs.org/espree/-/espree-3.5.4.tgz", + "integrity": "sha512-yAcIQxtmMiB/jL32dzEp2enBeidsB7xWPLNiw3IIkpVds1P+h7qF9YwJq1yUNzp2OKXgAprs4F61ih66UsoD1A==", + "dev": true, + "requires": { + "acorn": "^5.5.0", + "acorn-jsx": "^3.0.0" + } + } + } + } + } + }, + "prettier-plugin-go-template": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/prettier-plugin-go-template/-/prettier-plugin-go-template-0.0.10.tgz", + "integrity": "sha512-TaHPqiMK/zfk+YhvKRf/1WZDgQ6ffnlxJZX5rwphqfxBOVEezZQtYistTB348MKrKnnwKpyXZWpMRo0/KCVB+A==", + "dev": true + }, + "pretty-format": { + "version": "23.6.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-23.6.0.tgz", + "integrity": "sha512-zf9NV1NSlDLDjycnwm6hpFATCGl/K1lt0R/GdkAK2O5LN/rwJoB+Mh93gGJjut4YbmecbfgLWVGSTCr0Ewvvbw==", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0", + "ansi-styles": "^3.2.0" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + } + } + }, + "private": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", + "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", + "dev": true + }, + "process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true + }, + "promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=", + "dev": true + }, + "prr": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", + "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=", + "dev": true + }, + "public-encrypt": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz", + "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==", + "dev": true, + "requires": { + "bn.js": "^4.1.0", + "browserify-rsa": "^4.0.0", + "create-hash": "^1.1.0", + "parse-asn1": "^5.0.0", + "randombytes": "^2.0.1", + "safe-buffer": "^5.1.2" + } + }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "dev": true, + "requires": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + }, + "dependencies": { + "pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + } + } + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "dev": true + }, + "querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", + "dev": true + }, + "querystring-es3": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz", + "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=", + "dev": true + }, + "quick-lru": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", + "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", + "dev": true + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "randomfill": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz", + "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==", + "dev": true, + "requires": { + "randombytes": "^2.0.5", + "safe-buffer": "^5.1.0" + } + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dev": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "readdirp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", + "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.11", + "micromatch": "^3.1.10", + "readable-stream": "^2.0.2" + } + }, + "regenerate": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", + "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==", + "dev": true + }, + "regenerator-runtime": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", + "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==", + "dev": true + }, + "regenerator-transform": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", + "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", + "dev": true, + "requires": { + "babel-runtime": "^6.18.0", + "babel-types": "^6.19.0", + "private": "^0.1.6" + } + }, + "regex-not": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", + "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "dev": true, + "requires": { + "extend-shallow": "^3.0.2", + "safe-regex": "^1.1.0" + } + }, + "regexpp": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", + "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", + "dev": true + }, + "regexpu-core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-2.0.0.tgz", + "integrity": "sha1-SdA4g3uNz4v6W5pCE5k45uoq4kA=", + "dev": true, + "requires": { + "regenerate": "^1.2.1", + "regjsgen": "^0.2.0", + "regjsparser": "^0.1.4" + } + }, + "regjsgen": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.2.0.tgz", + "integrity": "sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc=", + "dev": true + }, + "regjsparser": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.1.5.tgz", + "integrity": "sha1-fuj4Tcb6eS0/0K4ijSS9lJ6tIFw=", + "dev": true, + "requires": { + "jsesc": "~0.5.0" + }, + "dependencies": { + "jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", + "dev": true + } + } + }, + "remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", + "dev": true + }, + "repeat-element": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", + "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==", + "dev": true + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "repeating": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", + "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=", + "dev": true, + "requires": { + "is-finite": "^1.0.0" + } + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "dev": true + }, + "require-main-filename": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", + "dev": true + }, + "require-relative": { + "version": "0.8.7", + "resolved": "https://registry.npmjs.org/require-relative/-/require-relative-0.8.7.tgz", + "integrity": "sha1-eZlTn8ngR6N5KPoZb44VY9q9Nt4=", + "dev": true + }, + "resolve-cwd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz", + "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=", + "dev": true, + "requires": { + "resolve-from": "^3.0.0" + } + }, + "resolve-dir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz", + "integrity": "sha1-eaQGRMNivoLybv/nOcm7U4IEb0M=", + "dev": true, + "requires": { + "expand-tilde": "^2.0.0", + "global-modules": "^1.0.0" + }, + "dependencies": { + "global-modules": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz", + "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==", + "dev": true, + "requires": { + "global-prefix": "^1.0.1", + "is-windows": "^1.0.1", + "resolve-dir": "^1.0.0" + } + } + } + }, + "resolve-from": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz", + "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=", + "dev": true + }, + "resolve-url": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", + "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=", + "dev": true + }, + "restore-cursor": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", + "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", + "dev": true, + "requires": { + "onetime": "^2.0.0", + "signal-exit": "^3.0.2" + } + }, + "ret": { + "version": "0.1.15", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", + "dev": true + }, + "reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true + }, + "rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "dev": true, + "requires": { + "glob": "^7.1.3" + } + }, + "ripemd160": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", + "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", + "dev": true, + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "dev": true + }, + "run-parallel": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.10.tgz", + "integrity": "sha512-zb/1OuZ6flOlH6tQyMPUrE3x3Ulxjlo9WIVXR4yVYi4H9UXQaeIsPbLn2R3O3vQCnDKkAl2qHiuocKKX4Tz/Sw==", + "dev": true + }, + "run-queue": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz", + "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=", + "dev": true, + "requires": { + "aproba": "^1.1.1" + } + }, + "rxjs": { + "version": "6.6.3", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.3.tgz", + "integrity": "sha512-trsQc+xYYXZ3urjOiJOuCOa5N3jAZ3eiSpQB5hIT8zGlL2QfnHLJ2r7GMkBGuIausdJN1OneaI6gQlsqNHHmZQ==", + "dev": true, + "requires": { + "tslib": "^1.9.0" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "safe-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", + "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", + "dev": true, + "requires": { + "ret": "~0.1.10" + } + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "dev": true, + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "dev": true + }, + "semver-compare": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz", + "integrity": "sha1-De4hahyUGrN+nvsXiPavxf9VN/w=", + "dev": true + }, + "semver-regex": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-2.0.0.tgz", + "integrity": "sha512-mUdIBBvdn0PLOeP3TEkMH7HHeUP3GjsXCwKarjv/kGmUFOYg1VqEemKhoQpWMu6X2I8kHeuVdGibLGkVK+/5Qw==", + "dev": true + }, + "serialize-javascript": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz", + "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", + "dev": true, + "requires": { + "randombytes": "^2.1.0" + } + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "set-value": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz", + "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==", + "dev": true, + "requires": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "setimmediate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", + "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU=", + "dev": true + }, + "sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dev": true, + "requires": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dev": true, + "requires": { + "shebang-regex": "^1.0.0" + } + }, + "shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "slash": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", + "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=", + "dev": true + }, + "slice-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", + "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "astral-regex": "^1.0.0", + "is-fullwidth-code-point": "^2.0.0" + } + }, + "snapdragon": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", + "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "dev": true, + "requires": { + "base": "^0.11.1", + "debug": "^2.2.0", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "map-cache": "^0.2.2", + "source-map": "^0.5.6", + "source-map-resolve": "^0.5.0", + "use": "^3.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "dev": true, + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "snapdragon-node": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", + "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "dev": true, + "requires": { + "define-property": "^1.0.0", + "isobject": "^3.0.0", + "snapdragon-util": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "dev": true, + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "dev": true, + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "dev": true, + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + } + } + }, + "snapdragon-util": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", + "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "dev": true, + "requires": { + "kind-of": "^3.2.0" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "source-list-map": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz", + "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw==", + "dev": true + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + }, + "source-map-resolve": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz", + "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==", + "dev": true, + "requires": { + "atob": "^2.1.2", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + }, + "source-map-support": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", + "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "source-map-url": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", + "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=", + "dev": true + }, + "split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "dev": true, + "requires": { + "extend-shallow": "^3.0.0" + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "ssri": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.1.tgz", + "integrity": "sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA==", + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1" + } + }, + "static-extend": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", + "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", + "dev": true, + "requires": { + "define-property": "^0.2.5", + "object-copy": "^0.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "dev": true, + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "stream-browserify": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz", + "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==", + "dev": true, + "requires": { + "inherits": "~2.0.1", + "readable-stream": "^2.0.2" + } + }, + "stream-each": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz", + "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "stream-shift": "^1.0.0" + } + }, + "stream-http": { + "version": "2.8.3", + "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz", + "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==", + "dev": true, + "requires": { + "builtin-status-codes": "^3.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.3.6", + "to-arraybuffer": "^1.0.0", + "xtend": "^4.0.0" + } + }, + "stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==", + "dev": true + }, + "string-argv": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.1.tgz", + "integrity": "sha512-a1uQGz7IyVy9YwhqjZIZu1c8JO8dNIe20xBmSS6qu9kv++k3JGzCVmprbNN5Kn+BgzD5E7YYwg1CcjuJMRNsvg==", + "dev": true + }, + "string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dev": true, + "requires": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "dev": true, + "requires": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + } + }, + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dev": true, + "requires": { + "ansi-regex": "^4.1.0" + } + }, + "strip-eof": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", + "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", + "dev": true + }, + "strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true + }, + "style-loader": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/style-loader/-/style-loader-1.1.3.tgz", + "integrity": "sha512-rlkH7X/22yuwFYK357fMN/BxYOorfnfq0eD7+vqlemSK4wEcejFF1dg4zxP0euBW8NrYx2WZzZ8PPFevr7D+Kw==", + "dev": true, + "requires": { + "loader-utils": "^1.2.3", + "schema-utils": "^2.6.4" + }, + "dependencies": { + "schema-utils": { + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.6.5.tgz", + "integrity": "sha512-5KXuwKziQrTVHh8j/Uxz+QUbxkaLW9X/86NBlx/gnKgtsZA2GIVMUn17qWhRFwF8jdYb3Dig5hRO/W5mZqy6SQ==", + "dev": true, + "requires": { + "ajv": "^6.12.0", + "ajv-keywords": "^3.4.1" + } + } + } + }, + "supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "table": { + "version": "5.4.6", + "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", + "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", + "dev": true, + "requires": { + "ajv": "^6.10.2", + "lodash": "^4.17.14", + "slice-ansi": "^2.1.0", + "string-width": "^3.0.0" + } + }, + "tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "dev": true + }, + "terser": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", + "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", + "dev": true, + "requires": { + "commander": "^2.20.0", + "source-map": "~0.6.1", + "source-map-support": "~0.5.12" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "terser-webpack-plugin": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz", + "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==", + "dev": true, + "requires": { + "cacache": "^12.0.2", + "find-cache-dir": "^2.1.0", + "is-wsl": "^1.1.0", + "schema-utils": "^1.0.0", + "serialize-javascript": "^4.0.0", + "source-map": "^0.6.1", + "terser": "^4.1.2", + "webpack-sources": "^1.4.0", + "worker-farm": "^1.7.0" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", + "dev": true + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "timers-browserify": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz", + "integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==", + "dev": true, + "requires": { + "setimmediate": "^1.0.4" + } + }, + "tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "requires": { + "os-tmpdir": "~1.0.2" + } + }, + "to-arraybuffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz", + "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M=", + "dev": true + }, + "to-fast-properties": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", + "integrity": "sha1-uDVx+k2MJbguIxsG46MFXeTKGkc=", + "dev": true + }, + "to-object-path": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", + "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", + "dev": true, + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "to-regex": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", + "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "dev": true, + "requires": { + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "regex-not": "^1.0.2", + "safe-regex": "^1.1.0" + } + }, + "to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", + "dev": true, + "requires": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + } + }, + "trim-right": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz", + "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM=", + "dev": true + }, + "tslib": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz", + "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==", + "dev": true + }, + "tsutils": { + "version": "3.17.1", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.17.1.tgz", + "integrity": "sha512-kzeQ5B8H3w60nFY2g8cJIuH7JDpsALXySGtwGJ0p2LSjLgay3NdIpqq5SoOBe46bKDW2iq25irHCr8wjomUS2g==", + "dev": true, + "requires": { + "tslib": "^1.8.1" + } + }, + "tty-browserify": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", + "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY=", + "dev": true + }, + "type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "requires": { + "prelude-ls": "^1.2.1" + } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "typescript": { + "version": "3.9.7", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.7.tgz", + "integrity": "sha512-BLbiRkiBzAwsjut4x/dsibSTB6yWpwT5qWmC2OfuCg3GgVQCSgMs4vEctYPhsaGtd0AeuuHMkjZ2h2WG8MSzRw==", + "dev": true + }, + "union-value": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz", + "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==", + "dev": true, + "requires": { + "arr-union": "^3.1.0", + "get-value": "^2.0.6", + "is-extendable": "^0.1.1", + "set-value": "^2.0.1" + } + }, + "uniq": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz", + "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8=", + "dev": true + }, + "unique-filename": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "dev": true, + "requires": { + "unique-slug": "^2.0.0" + } + }, + "unique-slug": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4" + } + }, + "unset-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", + "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", + "dev": true, + "requires": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "dependencies": { + "has-value": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", + "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", + "dev": true, + "requires": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "dependencies": { + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dev": true, + "requires": { + "isarray": "1.0.0" + } + } + } + }, + "has-values": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", + "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=", + "dev": true + } + } + }, + "upath": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz", + "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==", + "dev": true + }, + "uri-js": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", + "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + } + }, + "urix": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", + "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=", + "dev": true + }, + "url": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", + "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", + "dev": true, + "requires": { + "punycode": "1.3.2", + "querystring": "0.2.0" + }, + "dependencies": { + "punycode": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", + "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", + "dev": true + } + } + }, + "use": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==", + "dev": true + }, + "util": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz", + "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==", + "dev": true, + "requires": { + "inherits": "2.0.3" + }, + "dependencies": { + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + } + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "v8-compile-cache": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.0.3.tgz", + "integrity": "sha512-CNmdbwQMBjwr9Gsmohvm0pbL954tJrNzf6gWL3K+QMQf00PF7ERGrEiLgjuU3mKreLC2MeGhUsNV9ybTbLgd3w==", + "dev": true + }, + "vm-browserify": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz", + "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==", + "dev": true + }, + "vue-eslint-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-7.1.1.tgz", + "integrity": "sha512-8FdXi0gieEwh1IprIBafpiJWcApwrU+l2FEj8c1HtHFdNXMd0+2jUSjBVmcQYohf/E72irwAXEXLga6TQcB3FA==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "eslint-scope": "^5.0.0", + "eslint-visitor-keys": "^1.1.0", + "espree": "^6.2.1", + "esquery": "^1.0.1", + "lodash": "^4.17.15" + }, + "dependencies": { + "acorn": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", + "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==", + "dev": true + }, + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + } + }, + "eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "requires": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + } + }, + "espree": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz", + "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==", + "dev": true, + "requires": { + "acorn": "^7.1.1", + "acorn-jsx": "^5.2.0", + "eslint-visitor-keys": "^1.1.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "watchpack": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.6.0.tgz", + "integrity": "sha512-i6dHe3EyLjMmDlU1/bGQpEw25XSjkJULPuAVKCbNRefQVq48yXKUpwg538F7AZTf9kyr57zj++pQFltUa5H7yA==", + "dev": true, + "requires": { + "chokidar": "^2.0.2", + "graceful-fs": "^4.1.2", + "neo-async": "^2.5.0" + } + }, + "webpack": { + "version": "4.42.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.42.0.tgz", + "integrity": "sha512-EzJRHvwQyBiYrYqhyjW9AqM90dE4+s1/XtCfn7uWg6cS72zH+2VPFAlsnW0+W0cDi0XRjNKUMoJtpSi50+Ph6w==", + "dev": true, + "requires": { + "@webassemblyjs/ast": "1.8.5", + "@webassemblyjs/helper-module-context": "1.8.5", + "@webassemblyjs/wasm-edit": "1.8.5", + "@webassemblyjs/wasm-parser": "1.8.5", + "acorn": "^6.2.1", + "ajv": "^6.10.2", + "ajv-keywords": "^3.4.1", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^4.1.0", + "eslint-scope": "^4.0.3", + "json-parse-better-errors": "^1.0.2", + "loader-runner": "^2.4.0", + "loader-utils": "^1.2.3", + "memory-fs": "^0.4.1", + "micromatch": "^3.1.10", + "mkdirp": "^0.5.1", + "neo-async": "^2.6.1", + "node-libs-browser": "^2.2.1", + "schema-utils": "^1.0.0", + "tapable": "^1.1.3", + "terser-webpack-plugin": "^1.4.3", + "watchpack": "^1.6.0", + "webpack-sources": "^1.4.1" + } + }, + "webpack-cli": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-3.3.11.tgz", + "integrity": "sha512-dXlfuml7xvAFwYUPsrtQAA9e4DOe58gnzSxhgrO/ZM/gyXTBowrsYeubyN4mqGhYdpXMFNyQ6emjJS9M7OBd4g==", + "dev": true, + "requires": { + "chalk": "2.4.2", + "cross-spawn": "6.0.5", + "enhanced-resolve": "4.1.0", + "findup-sync": "3.0.0", + "global-modules": "2.0.0", + "import-local": "2.0.0", + "interpret": "1.2.0", + "loader-utils": "1.2.3", + "supports-color": "6.1.0", + "v8-compile-cache": "2.0.3", + "yargs": "13.2.4" + }, + "dependencies": { + "emojis-list": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", + "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", + "dev": true + }, + "enhanced-resolve": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz", + "integrity": "sha512-F/7vkyTtyc/llOIn8oWclcB25KdRaiPBpZYDgJHgh/UHtpgT2p2eldQgtQnLtUvfMKPKxbRaQM/hHkvLHt1Vng==", + "dev": true, + "requires": { + "graceful-fs": "^4.1.2", + "memory-fs": "^0.4.0", + "tapable": "^1.0.0" + } + }, + "loader-utils": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.2.3.tgz", + "integrity": "sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==", + "dev": true, + "requires": { + "big.js": "^5.2.2", + "emojis-list": "^2.0.0", + "json5": "^1.0.1" + } + } + } + }, + "webpack-merge": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-4.2.2.tgz", + "integrity": "sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g==", + "requires": { + "lodash": "^4.17.15" + } + }, + "webpack-sources": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz", + "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==", + "dev": true, + "requires": { + "source-list-map": "^2.0.0", + "source-map": "~0.6.1" + }, + "dependencies": { + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + } + } + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "which-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", + "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", + "dev": true + }, + "which-pm-runs": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/which-pm-runs/-/which-pm-runs-1.0.0.tgz", + "integrity": "sha1-Zws6+8VS4LVd9rd4DKdGFfI60cs=", + "dev": true + }, + "word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "dev": true + }, + "worker-farm": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz", + "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==", + "dev": true, + "requires": { + "errno": "~0.1.7" + } + }, + "wrap-ansi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "write": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", + "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", + "dev": true, + "requires": { + "mkdirp": "^0.5.1" + } + }, + "xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true + }, + "y18n": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", + "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==", + "dev": true + }, + "yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "yaml": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.0.tgz", + "integrity": "sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg==", + "dev": true + }, + "yargs": { + "version": "13.2.4", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.2.4.tgz", + "integrity": "sha512-HG/DWAJa1PAnHT9JAhNa8AbAv3FPaiLzioSjCcmuXXhP8MlpHO5vwls4g4j6n30Z74GVQj8Xa62dWVx1QCGklg==", + "dev": true, + "requires": { + "cliui": "^5.0.0", + "find-up": "^3.0.0", + "get-caller-file": "^2.0.1", + "os-locale": "^3.1.0", + "require-directory": "^2.1.1", + "require-main-filename": "^2.0.0", + "set-blocking": "^2.0.0", + "string-width": "^3.0.0", + "which-module": "^2.0.0", + "y18n": "^4.0.0", + "yargs-parser": "^13.1.0" + } + }, + "yargs-parser": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "decamelize": "^1.2.0" + } + } + } +} diff --git a/blog/themes/harbor/package.json b/blog/themes/harbor/package.json new file mode 100644 index 0000000..85eadc9 --- /dev/null +++ b/blog/themes/harbor/package.json @@ -0,0 +1,52 @@ +{ + "name": "harbor", + "version": "1.0.0", + "description": "Simple and minimal personal blog theme for [Hugo](https://gohugo.io/).", + "main": "index.js", + "scripts": { + "format": "prettier-eslint --write $PWD/'static/src/**/*.js' $PWD/'layouts/**/*.html'; eslint $PWD/'static/src/**/*.js'", + "build-dev": "webpack --config webpack.development.js", + "build-prod": "webpack --config webpack.production.js", + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "matsuyoshi30", + "license": "MIT", + "dependencies": { + "instant.page": "^3.0.0", + "lunr": "^2.3.8", + "webpack-merge": "^4.2.2" + }, + "devDependencies": { + "@typescript-eslint/eslint-plugin": "^4.8.1", + "@typescript-eslint/parser": "^3.10.1", + "babel-core": "^6.26.3", + "babel-loader": "^7.1.5", + "babel-preset-env": "^1.7.0", + "css-loader": "^3.4.2", + "eslint": "^7.14.0", + "husky": "^4.3.0", + "lint-staged": "^10.5.1", + "mark.js": "^8.11.1", + "prettier": "^2.2.0", + "prettier-eslint": "^12.0.0", + "prettier-eslint-cli": "^5.0.0", + "prettier-plugin-go-template": "^0.0.10", + "style-loader": "^1.1.3", + "webpack": "^4.42.0", + "webpack-cli": "^3.3.11" + }, + "husky": { + "hooks": { + "pre-commit": "lint-staged" + } + }, + "lint-staged": { + "!(bundle).html": [ + "prettier-eslint --write $PWD/'static/src/**/*.js $PWD/'layouts/**/*.html'" + ], + "!(bundle).js": [ + "eslint $PWD/'static/src/**/*.js'" + ] + } +} diff --git a/blog/themes/harbor/sonar-project.properties b/blog/themes/harbor/sonar-project.properties new file mode 100644 index 0000000..710af87 --- /dev/null +++ b/blog/themes/harbor/sonar-project.properties @@ -0,0 +1,3 @@ +sonar.host.url=https://sonarcloud.io +sonar.organization=matsuyoshi30 +sonar.projectKey=matsuyoshi30_harbor \ No newline at end of file diff --git a/blog/themes/harbor/static/css/dark.css b/blog/themes/harbor/static/css/dark.css new file mode 100644 index 0000000..627c5be --- /dev/null +++ b/blog/themes/harbor/static/css/dark.css @@ -0,0 +1,11 @@ +html { + background-color: #171717 !important; +} +body { + filter: invert(100%) hue-rotate(180deg) brightness(105%) contrast(85%); + -webkit-filter: invert(100%) hue-rotate(180deg) brightness(105%) contrast(85%); +} +img, video, iframe, body * [style*="background-image"] { + filter: hue-rotate(180deg) contrast(100%) invert(100%); + -webkit-filter: hue-rotate(180deg) contrast(100%) invert(100%); +} \ No newline at end of file diff --git a/blog/themes/harbor/static/css/main.css b/blog/themes/harbor/static/css/main.css new file mode 100644 index 0000000..add6d79 --- /dev/null +++ b/blog/themes/harbor/static/css/main.css @@ -0,0 +1,522 @@ +/* noto-sans-jp-regular - japanese_latin */ +@font-face { + font-family: 'Noto Sans JP'; + font-style: normal; + font-weight: 400; + font-display: swap; + src: local('Noto Sans Japanese Regular'), local('NotoSansJapanese-Regular'), + url('../fonts/noto-sans-jp-v25-japanese_latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/noto-sans-jp-v25-japanese_latin-regular.woff') format('woff'); /* Modern Browsers */ +} + +/* roboto-regular - latin */ +@font-face { + font-family: 'Roboto'; + font-style: normal; + font-weight: 400; + font-display: swap; + src: local('Roboto'), local('Roboto-Regular'), + url('../fonts/roboto-v20-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */ + url('../fonts/roboto-v20-latin-regular.woff') format('woff'); /* Modern Browsers */ +} + +html { + background-color: #FFFFFF; +} + +body { + margin: 0; + padding: 0; + font-family: 'Noto Sans JP', sans-serif; + font-size: 16px; + color: #42464c; + background: 0 0; + flex-flow: column; + text-rendering: optimizeLegibility; +} + +a { + outline: none; + text-decoration: none; +} + +.error-text { + font-family: 'Roboto', Helvetica, sans-serif; + text-align: center; +} + +.header { + margin: auto; + position: relative; +} + +.navbar { + min-height: 50px; + margin-bottom: 20px; +} + +.nav { + top: 0; + position: relative; + max-width: 800px; + margin: 20px auto; + padding: 0 10px; + text-align: right; +} + +.nav-logo { + float: left; + transition: transform 300ms ease-out; +} + +.nav-logo:hover { + transform: scale(1.1); +} + +.nav-logo img { + display: block; + width: auto; +} + +.nav-links { + margin: 0; + padding: 0; + font-size: 14px; + list-style: none; +} + +.nav-links li { + display: inline-block; + margin: 0 0 0 10px; +} + +.nav-links li a em { + color: #000000; +} + +.intro-header { + margin: 40px 0 20px; + position: relative; +} + +.intro-header [class$="-heading"] { + text-align: center; +} + +.intro-header [class$="-heading"] h1 { + margin-top: 0; + padding-top: 0; + font-size: 50px; +} + +h1,h2,h3,h4,h5,h6 { + font-family: 'Roboto', Helvetica, sans-serif; + font-weight: 800; + color: #111111; +} + +.container[role=main] { + max-width: 700px; + padding: 0 15px; + font-size: 16px; + line-height: 1.7; + color: #333333; +} + +.container img { + width: 100%; +} + +#blog-archives { + margin: 20px auto; + font-size: 14px; +} + +.archives { + margin: 20px auto; +} + +.archives td { + border: none; + text-align: left; +} + +.article { + text-align: justify; +} + +#TableOfContents { + font-size: 14px; + border: 2px dotted #cccccc; + margin: 1em 0; + padding: 0.5em 0; + background-color: #f0f0f0; +} + +#TableOfContents ul { + list-style-type: none; +} + +#TableOfContents ul ul { + list-style-type: disc; +} + +p { + line-height: 1.5; + margin: 0.5em 0; +} + +p + p { + margin-top: 1em; +} + +.social-icon { + margin-left: 0.2em; + margin-right: 0.2em; +} + +.post-preview { + padding-bottom: 10px; + border-bottom: 1px solid #eeeeee; +} + +.post-preview a { + text-decoration: none; + color: #222222; +} + +.post-preview:last-child { + border-bottom: 0; +} + +.postmeta { + margin: 10px 0; +} + +.blog-tags { + font-family: 'Roboto', Helvetica, sans-serif; + color: #999999; + font-size: 15px; + margin: 30px 0; +} + +.blog-tags a { + color: #0000BB; + text-decoration: none; + padding: 0px 5px; +} + +.blog-tags a:before { + content: "#"; +} + +h4.term-name > span.badge { + float: right; +} + +div.panel-body { + font-family: 'Roboto', Helvetica, sans-serif; + font-weight: 800; + border-radius: 0; + border: none; + font-size: 16px; +} + +.post-entry { + width: 100%; + margin-top: 10px; +} + +.post-read-more { + font-family: 'Roboto', Helvetica, sans-serif; + font-weight: 800; + float: right; + position: relative; + display: block; + text-decoration: none; +} + +a.post-read-more::after { + position: absolute; + bottom: -4px; + left: 0; + content: ''; + width: 100%; + height: 2px; + background: #333; + transform: scale(0, 1); + transform-origin: center top; + transition: transform .3s; +} + +a.post-read-more:hover::after { + transform: scale(1, 1); +} + +blockquote { + color: #808080; + padding: 0 10px; + border-left: 4px solid #aaaaaa; +} + +blockquote p:first-child { + margin-top: 0; +} + +table { + padding: 0; + border-spacing: 0; +} + +table tr { + border-top: 1px solid #dddddd; + margin: 0; + padding: 0; +} + +table tr th { + font-weight: bold; + border: 1px solid #dddddd; + text-align: left; + margin: 0; + padding: 6px 13px; +} + +table tr td { + border: 1px solid #dddddd; + text-align: left; + margin: 0; + padding: 6px 12px; +} + +table tr th :first-child, +table tr td :first-child { + margin-top: 0; +} + +table tr th :last-child, +table tr td :last-child { + margin-bottom: 0; +} + +.chroma .ln { + margin-right: 0.8em; + padding: 0 0.4em 0 0.4em; +} + +pre { + display: block; + padding: 9.5px; + margin: 0 0 10px; + font-size: 13px; + line-height: 1.42857143; + color: #333; + word-break: break-all; + word-wrap: break-word; + background-color: #f5f5f5; + border: 1px solid #cccccc; + border-radius: 4px; +} + +pre code { + padding: 0; + font-family: Menlo, Monaco, Consolas, monospace; + font-size: inherit; + color: inherit; + white-space: pre-wrap; + background-color: transparent; + border-radius: 0; +} + +code { + padding: 2px 4px; + font-size: 90%; + color: #dd0011; + background-color: #f9f9f9; + border-radius: 4px; +} + +#backtotopButton { + position: fixed; + bottom: 20px; + right: 20px; + z-index: 99; + border: none; + outline: none; + background-color: #eeeeff; + cursor: pointer; + padding: 15px; + border-radius: 10px; + font-size: 16px; + text-align: center; +} + +#backtotopButton:hover { + background-color: #aaaaaa; +} + +.searchBoxContainer { + position: relative; + width: 300px; + height: 30px; + margin: 10px auto 50px auto; +} + +input.searchBox { + position: absolute; + width: 100%; + padding: 0 35px 0 15px; + top: 0; + left: 0; + right: 0; + bottom: 0; + border-radius: 15px; + outline: 0; + font-size: 16px; + color: #707070; + background-color:#f6f6f6; + border: solid 1px #c9c9c9; + box-sizing: border-box; +} + +.searchBox::placeholder { + color: #c9c9c9; +} + +.searchResults { + display: none; + max-width: 600px; + min-width: 300px; + margin: 0 auto; + top: 210px; + left: 0; + right: 0; + padding: 5px; + border-radius: 5px; + text-align: left; +} + +.searchResultPage { + padding: 14px +} + +.searchResultTitle { + font-family: 'Roboto', Helvetica, sans-serif; + font-weight: bold; + font-size: 24px; + margin: 5px 0; +} + +.searchResultBody { + font-size: 16px; +} + +mark { + background-color: #eeff00; +} + +.pager { + list-style: none; + text-align: center; + margin:20px 0 0; + padding-left: 0; +} + +.pager ul { + display: block; +} + +.pager li { + display: inline; +} + +.pager li a { + box-sizing: border-box; + font-family: 'Roboto', Helvetica, sans-serif; + text-transform: uppercase; + text-align: center; + font-size: 14px; + font-weight: 800; + letter-spacing: 1px; + padding: 10px 5px; + background: #ffffff; + border-radius: 0; + border: 1px solid #dddddd; + display: inline-block; + color: #404040; + text-decoration: none; +} + +.pager a:hover:not(.active) { + background-color: #dddddd; +} + +.pager .previous > a { + float: left; + display: block; +} + +.pager .next > a { + float: right; + display: block; +} + +footer { + padding: 60px 0; + text-align: center; + margin-top: auto; + font-size: 14px; + font-family: 'Roboto', Helvetica, sans-serif; +} + +footer .copyright { + font-family: 'Roboto', Helvetica, sans-serif; + text-align: center; + margin-bottom: 0; +} + +footer .theme-by { + text-align: center; + margin: 10px 0 0; +} + +footer a { + color: #050505; + font-weight: bold; +} + +footer em { + cursor: pointer; +} + +@media (min-width: 600px) { + .header { + margin: auto; + } + + .nav-links { + font-size: 18px; + } + + .nav-links li { + margin: 0 0 0 30px; + } + + .container[role=main] { + font-size: 16px; + line-height: 1.8; + margin: 40px auto; + } + + .blog-tags { + margin: 20px 0; + } + + .pager li a { + padding: 10px 20px; + } + + .pager.blog-pager { + margin-top: 40px; + } +} diff --git a/blog/themes/harbor/static/css/syntax.css b/blog/themes/harbor/static/css/syntax.css new file mode 100644 index 0000000..cbf8b7f --- /dev/null +++ b/blog/themes/harbor/static/css/syntax.css @@ -0,0 +1,82 @@ +/* Background */ .chroma { background-color: #f8f8f8 } +/* Other */ .chroma .x { color: #000000 } +/* Error */ .chroma .err { color: #a40000 } +/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; } +/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; width: auto; overflow: auto; display: block; } +/* LineHighlight */ .chroma .hl { display: block; width: 100%;background-color: #ffffcc } +/* LineNumbersTable */ .chroma .lnt { margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f } +/* LineNumbers */ .chroma .ln { margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f } +/* Keyword */ .chroma .k { color: #204a87; font-weight: bold } +/* KeywordConstant */ .chroma .kc { color: #204a87; font-weight: bold } +/* KeywordDeclaration */ .chroma .kd { color: #204a87; font-weight: bold } +/* KeywordNamespace */ .chroma .kn { color: #204a87; font-weight: bold } +/* KeywordPseudo */ .chroma .kp { color: #204a87; font-weight: bold } +/* KeywordReserved */ .chroma .kr { color: #204a87; font-weight: bold } +/* KeywordType */ .chroma .kt { color: #204a87; font-weight: bold } +/* Name */ .chroma .n { color: #000000 } +/* NameAttribute */ .chroma .na { color: #c4a000 } +/* NameBuiltin */ .chroma .nb { color: #204a87 } +/* NameBuiltinPseudo */ .chroma .bp { color: #3465a4 } +/* NameClass */ .chroma .nc { color: #000000 } +/* NameConstant */ .chroma .no { color: #000000 } +/* NameDecorator */ .chroma .nd { color: #5c35cc; font-weight: bold } +/* NameEntity */ .chroma .ni { color: #ce5c00 } +/* NameException */ .chroma .ne { color: #cc0000; font-weight: bold } +/* NameFunction */ .chroma .nf { color: #000000 } +/* NameFunctionMagic */ .chroma .fm { color: #000000 } +/* NameLabel */ .chroma .nl { color: #f57900 } +/* NameNamespace */ .chroma .nn { color: #000000 } +/* NameOther */ .chroma .nx { color: #000000 } +/* NameProperty */ .chroma .py { color: #000000 } +/* NameTag */ .chroma .nt { color: #204a87; font-weight: bold } +/* NameVariable */ .chroma .nv { color: #000000 } +/* NameVariableClass */ .chroma .vc { color: #000000 } +/* NameVariableGlobal */ .chroma .vg { color: #000000 } +/* NameVariableInstance */ .chroma .vi { color: #000000 } +/* NameVariableMagic */ .chroma .vm { color: #000000 } +/* Literal */ .chroma .l { color: #000000 } +/* LiteralDate */ .chroma .ld { color: #000000 } +/* LiteralString */ .chroma .s { color: #4e9a06 } +/* LiteralStringAffix */ .chroma .sa { color: #4e9a06 } +/* LiteralStringBacktick */ .chroma .sb { color: #4e9a06 } +/* LiteralStringChar */ .chroma .sc { color: #4e9a06 } +/* LiteralStringDelimiter */ .chroma .dl { color: #4e9a06 } +/* LiteralStringDoc */ .chroma .sd { color: #8f5902; font-style: italic } +/* LiteralStringDouble */ .chroma .s2 { color: #4e9a06 } +/* LiteralStringEscape */ .chroma .se { color: #4e9a06 } +/* LiteralStringHeredoc */ .chroma .sh { color: #4e9a06 } +/* LiteralStringInterpol */ .chroma .si { color: #4e9a06 } +/* LiteralStringOther */ .chroma .sx { color: #4e9a06 } +/* LiteralStringRegex */ .chroma .sr { color: #4e9a06 } +/* LiteralStringSingle */ .chroma .s1 { color: #4e9a06 } +/* LiteralStringSymbol */ .chroma .ss { color: #4e9a06 } +/* LiteralNumber */ .chroma .m { color: #0000cf; font-weight: bold } +/* LiteralNumberBin */ .chroma .mb { color: #0000cf; font-weight: bold } +/* LiteralNumberFloat */ .chroma .mf { color: #0000cf; font-weight: bold } +/* LiteralNumberHex */ .chroma .mh { color: #0000cf; font-weight: bold } +/* LiteralNumberInteger */ .chroma .mi { color: #0000cf; font-weight: bold } +/* LiteralNumberIntegerLong */ .chroma .il { color: #0000cf; font-weight: bold } +/* LiteralNumberOct */ .chroma .mo { color: #0000cf; font-weight: bold } +/* Operator */ .chroma .o { color: #ce5c00; font-weight: bold } +/* OperatorWord */ .chroma .ow { color: #204a87; font-weight: bold } +/* Punctuation */ .chroma .p { color: #000000; font-weight: bold } +/* Comment */ .chroma .c { color: #8f5902; font-style: italic } +/* CommentHashbang */ .chroma .ch { color: #8f5902; font-style: italic } +/* CommentMultiline */ .chroma .cm { color: #8f5902; font-style: italic } +/* CommentSingle */ .chroma .c1 { color: #8f5902; font-style: italic } +/* CommentSpecial */ .chroma .cs { color: #8f5902; font-style: italic } +/* CommentPreproc */ .chroma .cp { color: #8f5902; font-style: italic } +/* CommentPreprocFile */ .chroma .cpf { color: #8f5902; font-style: italic } +/* Generic */ .chroma .g { color: #000000 } +/* GenericDeleted */ .chroma .gd { color: #a40000 } +/* GenericEmph */ .chroma .ge { color: #000000; font-style: italic } +/* GenericError */ .chroma .gr { color: #ef2929 } +/* GenericHeading */ .chroma .gh { color: #000080; font-weight: bold } +/* GenericInserted */ .chroma .gi { color: #00a000 } +/* GenericOutput */ .chroma .go { color: #000000; font-style: italic } +/* GenericPrompt */ .chroma .gp { color: #8f5902 } +/* GenericStrong */ .chroma .gs { color: #000000; font-weight: bold } +/* GenericSubheading */ .chroma .gu { color: #800080; font-weight: bold } +/* GenericTraceback */ .chroma .gt { color: #a40000; font-weight: bold } +/* GenericUnderline */ .chroma .gl { color: #000000; text-decoration: underline } +/* TextWhitespace */ .chroma .w { color: #f8f8f8; text-decoration: underline } diff --git a/blog/themes/harbor/static/favicon.ico b/blog/themes/harbor/static/favicon.ico new file mode 100644 index 0000000..ab71f79 Binary files /dev/null and b/blog/themes/harbor/static/favicon.ico differ diff --git a/blog/themes/harbor/static/fontawesome/css/all.min.css b/blog/themes/harbor/static/fontawesome/css/all.min.css new file mode 100644 index 0000000..d16a4d5 --- /dev/null +++ b/blog/themes/harbor/static/fontawesome/css/all.min.css @@ -0,0 +1,5 @@ +/*! + * Font Awesome Free 5.12.1 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + */ +.fa,.fab,.fad,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}:root .fa-flip-both,:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{-webkit-filter:none;filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adobe:before{content:"\f778"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-airbnb:before{content:"\f834"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-bacon:before{content:"\f7e5"}.fa-bahai:before{content:"\f666"}.fa-balance-scale:before{content:"\f24e"}.fa-balance-scale-left:before{content:"\f515"}.fa-balance-scale-right:before{content:"\f516"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-battle-net:before{content:"\f835"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-biking:before{content:"\f84a"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-medical:before{content:"\f7e6"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bootstrap:before{content:"\f836"}.fa-border-all:before{content:"\f84c"}.fa-border-none:before{content:"\f850"}.fa-border-style:before{content:"\f853"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-bread-slice:before{content:"\f7ec"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-buffer:before{content:"\f837"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buy-n-large:before{content:"\f8a6"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caravan:before{content:"\f8ff"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-cheese:before{content:"\f7ef"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-chromecast:before{content:"\f838"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clinic-medical:before{content:"\f7f2"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-medical:before{content:"\f7f5"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-alt:before{content:"\f422"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-cotton-bureau:before{content:"\f89e"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-crutch:before{content:"\f7f7"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dailymotion:before{content:"\f952"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edit:before{content:"\f044"}.fa-egg:before{content:"\f7fb"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-evernote:before{content:"\f839"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-alt:before{content:"\f424"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fan:before{content:"\f863"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-firefox-browser:before{content:"\f907"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-alt:before{content:"\f841"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hamburger:before{content:"\f805"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-middle-finger:before{content:"\f806"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-handshake:before{content:"\f2b5"}.fa-hanukiah:before{content:"\f6e6"}.fa-hard-hat:before{content:"\f807"}.fa-hashtag:before{content:"\f292"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-hat-wizard:before{content:"\f6e8"}.fa-hdd:before{content:"\f0a0"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hot-tub:before{content:"\f593"}.fa-hotdog:before{content:"\f80f"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-ice-cream:before{content:"\f810"}.fa-icicles:before{content:"\f7ad"}.fa-icons:before{content:"\f86d"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-ideal:before{content:"\f913"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-instagram:before{content:"\f16d"}.fa-instagram-square:before{content:"\f955"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itch-io:before{content:"\f83a"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laptop-medical:before{content:"\f812"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-mdb:before{content:"\f8ca"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microblog:before{content:"\f91a"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mixer:before{content:"\f956"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse:before{content:"\f8cc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-orcid:before{content:"\f8d2"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-pager:before{content:"\f815"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-carry:before{content:"\f4ce"}.fa-pepper-hot:before{content:"\f816"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-alt:before{content:"\f879"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-square-alt:before{content:"\f87b"}.fa-phone-volume:before{content:"\f2a0"}.fa-photo-video:before{content:"\f87c"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-square:before{content:"\f91e"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-pizza-slice:before{content:"\f818"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-record-vinyl:before{content:"\f8d9"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-remove-format:before{content:"\f87d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-salesforce:before{content:"\f83b"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopify:before{content:"\f957"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-down-alt:before{content:"\f884"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-amount-up-alt:before{content:"\f885"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-speaker-deck:before{content:"\f83c"}.fa-spell-check:before{content:"\f891"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stackpath:before{content:"\f842"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swift:before{content:"\f8e1"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-symfony:before{content:"\f83d"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-trailer:before{content:"\f941"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-trash-restore:before{content:"\f829"}.fa-trash-restore-alt:before{content:"\f82a"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbraco:before{content:"\f8e8"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-unity:before{content:"\f949"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-nurse:before{content:"\f82f"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-voicemail:before{content:"\f897"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-water:before{content:"\f773"}.fa-wave-square:before{content:"\f83e"}.fa-waze:before{content:"\f83f"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yammer:before{content:"\f840"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:400;font-display:auto;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;font-display:auto;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.fab,.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;font-display:auto;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/blog/themes/harbor/static/fontawesome/webfonts/fa-brands-400.woff b/blog/themes/harbor/static/fontawesome/webfonts/fa-brands-400.woff new file mode 100644 index 0000000..a43870c Binary files /dev/null and b/blog/themes/harbor/static/fontawesome/webfonts/fa-brands-400.woff differ diff --git a/blog/themes/harbor/static/fontawesome/webfonts/fa-brands-400.woff2 b/blog/themes/harbor/static/fontawesome/webfonts/fa-brands-400.woff2 new file mode 100644 index 0000000..3c5189d Binary files /dev/null and b/blog/themes/harbor/static/fontawesome/webfonts/fa-brands-400.woff2 differ diff --git a/blog/themes/harbor/static/fontawesome/webfonts/fa-regular-400.woff b/blog/themes/harbor/static/fontawesome/webfonts/fa-regular-400.woff new file mode 100644 index 0000000..f17953a Binary files /dev/null and b/blog/themes/harbor/static/fontawesome/webfonts/fa-regular-400.woff differ diff --git a/blog/themes/harbor/static/fontawesome/webfonts/fa-regular-400.woff2 b/blog/themes/harbor/static/fontawesome/webfonts/fa-regular-400.woff2 new file mode 100644 index 0000000..1f796c7 Binary files /dev/null and b/blog/themes/harbor/static/fontawesome/webfonts/fa-regular-400.woff2 differ diff --git a/blog/themes/harbor/static/fontawesome/webfonts/fa-solid-900.woff b/blog/themes/harbor/static/fontawesome/webfonts/fa-solid-900.woff new file mode 100644 index 0000000..3c9ef93 Binary files /dev/null and b/blog/themes/harbor/static/fontawesome/webfonts/fa-solid-900.woff differ diff --git a/blog/themes/harbor/static/fontawesome/webfonts/fa-solid-900.woff2 b/blog/themes/harbor/static/fontawesome/webfonts/fa-solid-900.woff2 new file mode 100644 index 0000000..ba7507b Binary files /dev/null and b/blog/themes/harbor/static/fontawesome/webfonts/fa-solid-900.woff2 differ diff --git a/blog/themes/harbor/static/fonts/noto-sans-jp-v25-japanese_latin-regular.woff b/blog/themes/harbor/static/fonts/noto-sans-jp-v25-japanese_latin-regular.woff new file mode 100644 index 0000000..3112566 Binary files /dev/null and b/blog/themes/harbor/static/fonts/noto-sans-jp-v25-japanese_latin-regular.woff differ diff --git a/blog/themes/harbor/static/fonts/noto-sans-jp-v25-japanese_latin-regular.woff2 b/blog/themes/harbor/static/fonts/noto-sans-jp-v25-japanese_latin-regular.woff2 new file mode 100644 index 0000000..b648b37 Binary files /dev/null and b/blog/themes/harbor/static/fonts/noto-sans-jp-v25-japanese_latin-regular.woff2 differ diff --git a/blog/themes/harbor/static/fonts/roboto-v20-latin-regular.woff b/blog/themes/harbor/static/fonts/roboto-v20-latin-regular.woff new file mode 100644 index 0000000..69c8825 Binary files /dev/null and b/blog/themes/harbor/static/fonts/roboto-v20-latin-regular.woff differ diff --git a/blog/themes/harbor/static/fonts/roboto-v20-latin-regular.woff2 b/blog/themes/harbor/static/fonts/roboto-v20-latin-regular.woff2 new file mode 100644 index 0000000..1a53701 Binary files /dev/null and b/blog/themes/harbor/static/fonts/roboto-v20-latin-regular.woff2 differ diff --git a/blog/themes/harbor/static/images/icon.png b/blog/themes/harbor/static/images/icon.png new file mode 100644 index 0000000..9c1a810 Binary files /dev/null and b/blog/themes/harbor/static/images/icon.png differ diff --git a/blog/themes/harbor/static/js/bundle.js b/blog/themes/harbor/static/js/bundle.js new file mode 100644 index 0000000..111f41a --- /dev/null +++ b/blog/themes/harbor/static/js/bundle.js @@ -0,0 +1,58 @@ +!function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=4)}([function(e,t,n){var r,i; +/** + * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.8 + * Copyright (C) 2019 Oliver Nightingale + * @license MIT + */!function(){var o,a,s,l,c,u,d,h,f,p,m,g,y,v,x,b,w,k,S,E,N,T,R,L,I,O,Q=function(e){var t=new Q.Builder;return t.pipeline.add(Q.trimmer,Q.stopWordFilter,Q.stemmer),t.searchPipeline.add(Q.stemmer),e.call(t,t),t.build()};Q.version="2.3.8" +/*! + * lunr.utils + * Copyright (C) 2019 Oliver Nightingale + */,Q.utils={},Q.utils.warn=(o=this,function(e){o.console&&console.warn&&console.warn(e)}),Q.utils.asString=function(e){return null==e?"":e.toString()},Q.utils.clone=function(e){if(null==e)return e;for(var t=Object.create(null),n=Object.keys(e),r=0;r0){var l=Q.utils.clone(t)||{};l.position=[a,s],l.index=i.length,i.push(new Q.Token(n.slice(a,o),l))}a=o+1}}return i},Q.tokenizer.separator=/[\s\-]+/ +/*! + * lunr.Pipeline + * Copyright (C) 2019 Oliver Nightingale + */,Q.Pipeline=function(){this._stack=[]},Q.Pipeline.registeredFunctions=Object.create(null),Q.Pipeline.registerFunction=function(e,t){t in this.registeredFunctions&&Q.utils.warn("Overwriting existing registered function: "+t),e.label=t,Q.Pipeline.registeredFunctions[e.label]=e},Q.Pipeline.warnIfFunctionNotRegistered=function(e){e.label&&e.label in this.registeredFunctions||Q.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},Q.Pipeline.load=function(e){var t=new Q.Pipeline;return e.forEach((function(e){var n=Q.Pipeline.registeredFunctions[e];if(!n)throw new Error("Cannot load unregistered function: "+e);t.add(n)})),t},Q.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach((function(e){Q.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)}),this)},Q.Pipeline.prototype.after=function(e,t){Q.Pipeline.warnIfFunctionNotRegistered(t);var n=this._stack.indexOf(e);if(-1==n)throw new Error("Cannot find existingFn");n+=1,this._stack.splice(n,0,t)},Q.Pipeline.prototype.before=function(e,t){Q.Pipeline.warnIfFunctionNotRegistered(t);var n=this._stack.indexOf(e);if(-1==n)throw new Error("Cannot find existingFn");this._stack.splice(n,0,t)},Q.Pipeline.prototype.remove=function(e){var t=this._stack.indexOf(e);-1!=t&&this._stack.splice(t,1)},Q.Pipeline.prototype.run=function(e){for(var t=this._stack.length,n=0;n1&&(oe&&(n=i),o!=e);)r=n-t,i=t+Math.floor(r/2),o=this.elements[2*i];return o==e||o>e?2*i:os?c+=2:a==s&&(t+=n[l+1]*r[c+1],l+=2,c+=2);return t},Q.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},Q.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),t=1,n=0;t0){var o,a=i.str.charAt(0);a in i.node.edges?o=i.node.edges[a]:(o=new Q.TokenSet,i.node.edges[a]=o),1==i.str.length&&(o.final=!0),r.push({node:o,editsRemaining:i.editsRemaining,str:i.str.slice(1)})}if(0!=i.editsRemaining){if("*"in i.node.edges)var s=i.node.edges["*"];else{s=new Q.TokenSet;i.node.edges["*"]=s}if(0==i.str.length&&(s.final=!0),r.push({node:s,editsRemaining:i.editsRemaining-1,str:i.str}),i.str.length>1&&r.push({node:i.node,editsRemaining:i.editsRemaining-1,str:i.str.slice(1)}),1==i.str.length&&(i.node.final=!0),i.str.length>=1){if("*"in i.node.edges)var l=i.node.edges["*"];else{l=new Q.TokenSet;i.node.edges["*"]=l}1==i.str.length&&(l.final=!0),r.push({node:l,editsRemaining:i.editsRemaining-1,str:i.str.slice(1)})}if(i.str.length>1){var c,u=i.str.charAt(0),d=i.str.charAt(1);d in i.node.edges?c=i.node.edges[d]:(c=new Q.TokenSet,i.node.edges[d]=c),1==i.str.length&&(c.final=!0),r.push({node:c,editsRemaining:i.editsRemaining-1,str:u+i.str.slice(2)})}}}return n},Q.TokenSet.fromString=function(e){for(var t=new Q.TokenSet,n=t,r=0,i=e.length;r=e;t--){var n=this.uncheckedNodes[t],r=n.child.toString();r in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[r]:(n.child._str=r,this.minimizedNodes[r]=n.child),this.uncheckedNodes.pop()}} +/*! + * lunr.Index + * Copyright (C) 2019 Oliver Nightingale + */,Q.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},Q.Index.prototype.search=function(e){return this.query((function(t){new Q.QueryParser(e,t).parse()}))},Q.Index.prototype.query=function(e){for(var t=new Q.Query(this.fields),n=Object.create(null),r=Object.create(null),i=Object.create(null),o=Object.create(null),a=Object.create(null),s=0;s1?1:e},Q.Builder.prototype.k1=function(e){this._k1=e},Q.Builder.prototype.add=function(e,t){var n=e[this._ref],r=Object.keys(this._fields);this._documents[n]=t||{},this.documentCount+=1;for(var i=0;i=this.length)return Q.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},Q.QueryLexer.prototype.width=function(){return this.pos-this.start},Q.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},Q.QueryLexer.prototype.backup=function(){this.pos-=1},Q.QueryLexer.prototype.acceptDigitRun=function(){var e,t;do{t=(e=this.next()).charCodeAt(0)}while(t>47&&t<58);e!=Q.QueryLexer.EOS&&this.backup()},Q.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(Q.QueryLexer.TERM)),e.ignore(),e.more())return Q.QueryLexer.lexText},Q.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(Q.QueryLexer.EDIT_DISTANCE),Q.QueryLexer.lexText},Q.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(Q.QueryLexer.BOOST),Q.QueryLexer.lexText},Q.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(Q.QueryLexer.TERM)},Q.QueryLexer.termSeparator=Q.tokenizer.separator,Q.QueryLexer.lexText=function(e){for(;;){var t=e.next();if(t==Q.QueryLexer.EOS)return Q.QueryLexer.lexEOS;if(92!=t.charCodeAt(0)){if(":"==t)return Q.QueryLexer.lexField;if("~"==t)return e.backup(),e.width()>0&&e.emit(Q.QueryLexer.TERM),Q.QueryLexer.lexEditDistance;if("^"==t)return e.backup(),e.width()>0&&e.emit(Q.QueryLexer.TERM),Q.QueryLexer.lexBoost;if("+"==t&&1===e.width())return e.emit(Q.QueryLexer.PRESENCE),Q.QueryLexer.lexText;if("-"==t&&1===e.width())return e.emit(Q.QueryLexer.PRESENCE),Q.QueryLexer.lexText;if(t.match(Q.QueryLexer.termSeparator))return Q.QueryLexer.lexTerm}else e.escapeCharacter()}},Q.QueryParser=function(e,t){this.lexer=new Q.QueryLexer(e),this.query=t,this.currentClause={},this.lexemeIdx=0},Q.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=Q.QueryParser.parseClause;e;)e=e(this);return this.query},Q.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},Q.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},Q.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},Q.QueryParser.parseClause=function(e){var t=e.peekLexeme();if(null!=t)switch(t.type){case Q.QueryLexer.PRESENCE:return Q.QueryParser.parsePresence;case Q.QueryLexer.FIELD:return Q.QueryParser.parseField;case Q.QueryLexer.TERM:return Q.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+t.type;throw t.str.length>=1&&(n+=" with value '"+t.str+"'"),new Q.QueryParseError(n,t.start,t.end)}},Q.QueryParser.parsePresence=function(e){var t=e.consumeLexeme();if(null!=t){switch(t.str){case"-":e.currentClause.presence=Q.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=Q.Query.presence.REQUIRED;break;default:var n="unrecognised presence operator'"+t.str+"'";throw new Q.QueryParseError(n,t.start,t.end)}var r=e.peekLexeme();if(null==r){n="expecting term or field, found nothing";throw new Q.QueryParseError(n,t.start,t.end)}switch(r.type){case Q.QueryLexer.FIELD:return Q.QueryParser.parseField;case Q.QueryLexer.TERM:return Q.QueryParser.parseTerm;default:n="expecting term or field, found '"+r.type+"'";throw new Q.QueryParseError(n,r.start,r.end)}}},Q.QueryParser.parseField=function(e){var t=e.consumeLexeme();if(null!=t){if(-1==e.query.allFields.indexOf(t.str)){var n=e.query.allFields.map((function(e){return"'"+e+"'"})).join(", "),r="unrecognised field '"+t.str+"', possible fields: "+n;throw new Q.QueryParseError(r,t.start,t.end)}e.currentClause.fields=[t.str];var i=e.peekLexeme();if(null==i){r="expecting term, found nothing";throw new Q.QueryParseError(r,t.start,t.end)}switch(i.type){case Q.QueryLexer.TERM:return Q.QueryParser.parseTerm;default:r="expecting term, found '"+i.type+"'";throw new Q.QueryParseError(r,i.start,i.end)}}},Q.QueryParser.parseTerm=function(e){var t=e.consumeLexeme();if(null!=t){e.currentClause.term=t.str.toLowerCase(),-1!=t.str.indexOf("*")&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(null!=n)switch(n.type){case Q.QueryLexer.TERM:return e.nextClause(),Q.QueryParser.parseTerm;case Q.QueryLexer.FIELD:return e.nextClause(),Q.QueryParser.parseField;case Q.QueryLexer.EDIT_DISTANCE:return Q.QueryParser.parseEditDistance;case Q.QueryLexer.BOOST:return Q.QueryParser.parseBoost;case Q.QueryLexer.PRESENCE:return e.nextClause(),Q.QueryParser.parsePresence;default:var r="Unexpected lexeme type '"+n.type+"'";throw new Q.QueryParseError(r,n.start,n.end)}else e.nextClause()}},Q.QueryParser.parseEditDistance=function(e){var t=e.consumeLexeme();if(null!=t){var n=parseInt(t.str,10);if(isNaN(n)){var r="edit distance must be numeric";throw new Q.QueryParseError(r,t.start,t.end)}e.currentClause.editDistance=n;var i=e.peekLexeme();if(null!=i)switch(i.type){case Q.QueryLexer.TERM:return e.nextClause(),Q.QueryParser.parseTerm;case Q.QueryLexer.FIELD:return e.nextClause(),Q.QueryParser.parseField;case Q.QueryLexer.EDIT_DISTANCE:return Q.QueryParser.parseEditDistance;case Q.QueryLexer.BOOST:return Q.QueryParser.parseBoost;case Q.QueryLexer.PRESENCE:return e.nextClause(),Q.QueryParser.parsePresence;default:r="Unexpected lexeme type '"+i.type+"'";throw new Q.QueryParseError(r,i.start,i.end)}else e.nextClause()}},Q.QueryParser.parseBoost=function(e){var t=e.consumeLexeme();if(null!=t){var n=parseInt(t.str,10);if(isNaN(n)){var r="boost must be numeric";throw new Q.QueryParseError(r,t.start,t.end)}e.currentClause.boost=n;var i=e.peekLexeme();if(null!=i)switch(i.type){case Q.QueryLexer.TERM:return e.nextClause(),Q.QueryParser.parseTerm;case Q.QueryLexer.FIELD:return e.nextClause(),Q.QueryParser.parseField;case Q.QueryLexer.EDIT_DISTANCE:return Q.QueryParser.parseEditDistance;case Q.QueryLexer.BOOST:return Q.QueryParser.parseBoost;case Q.QueryLexer.PRESENCE:return e.nextClause(),Q.QueryParser.parsePresence;default:r="Unexpected lexeme type '"+i.type+"'";throw new Q.QueryParseError(r,i.start,i.end)}else e.nextClause()}},void 0===(i="function"==typeof(r=function(){return Q})?r.call(t,n,t,e):r)||(e.exports=i)}()},function(e,t,n){"use strict";var r,i=function(){return void 0===r&&(r=Boolean(window&&document&&document.all&&!window.atob)),r},o=function(){var e={};return function(t){if(void 0===e[t]){var n=document.querySelector(t);if(window.HTMLIFrameElement&&n instanceof window.HTMLIFrameElement)try{n=n.contentDocument.head}catch(e){n=null}e[t]=n}return e[t]}}(),a=[];function s(e){for(var t=-1,n=0;n1&&void 0!==arguments[1])||arguments[1],i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:5e3;t(this,e),this.ctx=n,this.iframes=r,this.exclude=i,this.iframesTimeout=o}return n(e,[{key:"getContexts",value:function(){var e=[];return(void 0!==this.ctx&&this.ctx?NodeList.prototype.isPrototypeOf(this.ctx)?Array.prototype.slice.call(this.ctx):Array.isArray(this.ctx)?this.ctx:"string"==typeof this.ctx?Array.prototype.slice.call(document.querySelectorAll(this.ctx)):[this.ctx]:[]).forEach((function(t){var n=e.filter((function(e){return e.contains(t)})).length>0;-1!==e.indexOf(t)||n||e.push(t)})),e}},{key:"getIframeContents",value:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){},r=void 0;try{var i=e.contentWindow;if(r=i.document,!i||!r)throw new Error("iframe inaccessible")}catch(e){n()}r&&t(r)}},{key:"isIframeBlank",value:function(e){var t="about:blank",n=e.getAttribute("src").trim();return e.contentWindow.location.href===t&&n!==t&&n}},{key:"observeIframeLoad",value:function(e,t,n){var r=this,i=!1,o=null,a=function a(){if(!i){i=!0,clearTimeout(o);try{r.isIframeBlank(e)||(e.removeEventListener("load",a),r.getIframeContents(e,t,n))}catch(e){n()}}};e.addEventListener("load",a),o=setTimeout(a,this.iframesTimeout)}},{key:"onIframeReady",value:function(e,t,n){try{"complete"===e.contentWindow.document.readyState?this.isIframeBlank(e)?this.observeIframeLoad(e,t,n):this.getIframeContents(e,t,n):this.observeIframeLoad(e,t,n)}catch(e){n()}}},{key:"waitForIframes",value:function(e,t){var n=this,r=0;this.forEachIframe(e,(function(){return!0}),(function(e){r++,n.waitForIframes(e.querySelector("html"),(function(){--r||t()}))}),(function(e){e||t()}))}},{key:"forEachIframe",value:function(t,n,r){var i=this,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:function(){},a=t.querySelectorAll("iframe"),s=a.length,l=0;a=Array.prototype.slice.call(a);var c=function(){--s<=0&&o(l)};s||c(),a.forEach((function(t){e.matches(t,i.exclude)?c():i.onIframeReady(t,(function(e){n(t)&&(l++,r(e)),c()}),c)}))}},{key:"createIterator",value:function(e,t,n){return document.createNodeIterator(e,t,n,!1)}},{key:"createInstanceOnIframe",value:function(t){return new e(t.querySelector("html"),this.iframes)}},{key:"compareNodeIframe",value:function(e,t,n){if(e.compareDocumentPosition(n)&Node.DOCUMENT_POSITION_PRECEDING){if(null===t)return!0;if(t.compareDocumentPosition(n)&Node.DOCUMENT_POSITION_FOLLOWING)return!0}return!1}},{key:"getIteratorNode",value:function(e){var t=e.previousNode();return{prevNode:t,node:(null===t||e.nextNode())&&e.nextNode()}}},{key:"checkIframeFilter",value:function(e,t,n,r){var i=!1,o=!1;return r.forEach((function(e,t){e.val===n&&(i=t,o=e.handled)})),this.compareNodeIframe(e,t,n)?(!1!==i||o?!1===i||o||(r[i].handled=!0):r.push({val:n,handled:!0}),!0):(!1===i&&r.push({val:n,handled:!1}),!1)}},{key:"handleOpenIframes",value:function(e,t,n,r){var i=this;e.forEach((function(e){e.handled||i.getIframeContents(e.val,(function(e){i.createInstanceOnIframe(e).forEachNode(t,n,r)}))}))}},{key:"iterateThroughNodes",value:function(e,t,n,r,i){for(var o,a=this,s=this.createIterator(t,e,r),l=[],c=[],u=void 0,d=void 0;o=void 0,o=a.getIteratorNode(s),d=o.prevNode,u=o.node;)this.iframes&&this.forEachIframe(t,(function(e){return a.checkIframeFilter(u,d,e,l)}),(function(t){a.createInstanceOnIframe(t).forEachNode(e,(function(e){return c.push(e)}),r)})),c.push(u);c.forEach((function(e){n(e)})),this.iframes&&this.handleOpenIframes(l,e,n,r),i()}},{key:"forEachNode",value:function(e,t,n){var r=this,i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:function(){},o=this.getContexts(),a=o.length;a||i(),o.forEach((function(o){var s=function(){r.iterateThroughNodes(e,o,t,n,(function(){--a<=0&&i()}))};r.iframes?r.waitForIframes(o,s):s()}))}}],[{key:"matches",value:function(e,t){var n="string"==typeof t?[t]:t,r=e.matches||e.matchesSelector||e.msMatchesSelector||e.mozMatchesSelector||e.oMatchesSelector||e.webkitMatchesSelector;if(r){var i=!1;return n.every((function(t){return!r.call(e,t)||(i=!0,!1)})),i}return!1}}]),e}(),o=function(){function o(e){t(this,o),this.ctx=e,this.ie=!1;var n=window.navigator.userAgent;(n.indexOf("MSIE")>-1||n.indexOf("Trident")>-1)&&(this.ie=!0)}return n(o,[{key:"log",value:function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"debug",r=this.opt.log;this.opt.debug&&"object"===(void 0===r?"undefined":e(r))&&"function"==typeof r[n]&&r[n]("mark.js: "+t)}},{key:"escapeStr",value:function(e){return e.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g,"\\$&")}},{key:"createRegExp",value:function(e){return"disabled"!==this.opt.wildcards&&(e=this.setupWildcardsRegExp(e)),e=this.escapeStr(e),Object.keys(this.opt.synonyms).length&&(e=this.createSynonymsRegExp(e)),(this.opt.ignoreJoiners||this.opt.ignorePunctuation.length)&&(e=this.setupIgnoreJoinersRegExp(e)),this.opt.diacritics&&(e=this.createDiacriticsRegExp(e)),e=this.createMergedBlanksRegExp(e),(this.opt.ignoreJoiners||this.opt.ignorePunctuation.length)&&(e=this.createJoinersRegExp(e)),"disabled"!==this.opt.wildcards&&(e=this.createWildcardsRegExp(e)),e=this.createAccuracyRegExp(e)}},{key:"createSynonymsRegExp",value:function(e){var t=this.opt.synonyms,n=this.opt.caseSensitive?"":"i",r=this.opt.ignoreJoiners||this.opt.ignorePunctuation.length?"\0":"";for(var i in t)if(t.hasOwnProperty(i)){var o=t[i],a="disabled"!==this.opt.wildcards?this.setupWildcardsRegExp(i):this.escapeStr(i),s="disabled"!==this.opt.wildcards?this.setupWildcardsRegExp(o):this.escapeStr(o);""!==a&&""!==s&&(e=e.replace(new RegExp("("+this.escapeStr(a)+"|"+this.escapeStr(s)+")","gm"+n),r+"("+this.processSynomyms(a)+"|"+this.processSynomyms(s)+")"+r))}return e}},{key:"processSynomyms",value:function(e){return(this.opt.ignoreJoiners||this.opt.ignorePunctuation.length)&&(e=this.setupIgnoreJoinersRegExp(e)),e}},{key:"setupWildcardsRegExp",value:function(e){return(e=e.replace(/(?:\\)*\?/g,(function(e){return"\\"===e.charAt(0)?"?":""}))).replace(/(?:\\)*\*/g,(function(e){return"\\"===e.charAt(0)?"*":""}))}},{key:"createWildcardsRegExp",value:function(e){var t="withSpaces"===this.opt.wildcards;return e.replace(/\u0001/g,t?"[\\S\\s]?":"\\S?").replace(/\u0002/g,t?"[\\S\\s]*?":"\\S*")}},{key:"setupIgnoreJoinersRegExp",value:function(e){return e.replace(/[^(|)\\]/g,(function(e,t,n){var r=n.charAt(t+1);return/[(|)\\]/.test(r)||""===r?e:e+"\0"}))}},{key:"createJoinersRegExp",value:function(e){var t=[],n=this.opt.ignorePunctuation;return Array.isArray(n)&&n.length&&t.push(this.escapeStr(n.join(""))),this.opt.ignoreJoiners&&t.push("\\u00ad\\u200b\\u200c\\u200d"),t.length?e.split(/\u0000+/).join("["+t.join("")+"]*"):e}},{key:"createDiacriticsRegExp",value:function(e){var t=this.opt.caseSensitive?"":"i",n=this.opt.caseSensitive?["aàáảãạăằắẳẵặâầấẩẫậäåāą","AÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬÄÅĀĄ","cçćč","CÇĆČ","dđď","DĐĎ","eèéẻẽẹêềếểễệëěēę","EÈÉẺẼẸÊỀẾỂỄỆËĚĒĘ","iìíỉĩịîïī","IÌÍỈĨỊÎÏĪ","lł","LŁ","nñňń","NÑŇŃ","oòóỏõọôồốổỗộơởỡớờợöøō","OÒÓỎÕỌÔỒỐỔỖỘƠỞỠỚỜỢÖØŌ","rř","RŘ","sšśșş","SŠŚȘŞ","tťțţ","TŤȚŢ","uùúủũụưừứửữựûüůū","UÙÚỦŨỤƯỪỨỬỮỰÛÜŮŪ","yýỳỷỹỵÿ","YÝỲỶỸỴŸ","zžżź","ZŽŻŹ"]:["aàáảãạăằắẳẵặâầấẩẫậäåāąAÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬÄÅĀĄ","cçćčCÇĆČ","dđďDĐĎ","eèéẻẽẹêềếểễệëěēęEÈÉẺẼẸÊỀẾỂỄỆËĚĒĘ","iìíỉĩịîïīIÌÍỈĨỊÎÏĪ","lłLŁ","nñňńNÑŇŃ","oòóỏõọôồốổỗộơởỡớờợöøōOÒÓỎÕỌÔỒỐỔỖỘƠỞỠỚỜỢÖØŌ","rřRŘ","sšśșşSŠŚȘŞ","tťțţTŤȚŢ","uùúủũụưừứửữựûüůūUÙÚỦŨỤƯỪỨỬỮỰÛÜŮŪ","yýỳỷỹỵÿYÝỲỶỸỴŸ","zžżźZŽŻŹ"],r=[];return e.split("").forEach((function(i){n.every((function(n){if(-1!==n.indexOf(i)){if(r.indexOf(n)>-1)return!1;e=e.replace(new RegExp("["+n+"]","gm"+t),"["+n+"]"),r.push(n)}return!0}))})),e}},{key:"createMergedBlanksRegExp",value:function(e){return e.replace(/[\s]+/gim,"[\\s]+")}},{key:"createAccuracyRegExp",value:function(e){var t=this,n=this.opt.accuracy,r="string"==typeof n?n:n.value,i="string"==typeof n?[]:n.limiters,o="";switch(i.forEach((function(e){o+="|"+t.escapeStr(e)})),r){case"partially":default:return"()("+e+")";case"complementary":return"()([^"+(o="\\s"+(o||this.escapeStr("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~¡¿")))+"]*"+e+"[^"+o+"]*)";case"exactly":return"(^|\\s"+o+")("+e+")(?=$|\\s"+o+")"}}},{key:"getSeparatedKeywords",value:function(e){var t=this,n=[];return e.forEach((function(e){t.opt.separateWordSearch?e.split(" ").forEach((function(e){e.trim()&&-1===n.indexOf(e)&&n.push(e)})):e.trim()&&-1===n.indexOf(e)&&n.push(e)})),{keywords:n.sort((function(e,t){return t.length-e.length})),length:n.length}}},{key:"isNumeric",value:function(e){return Number(parseFloat(e))==e}},{key:"checkRanges",value:function(e){var t=this;if(!Array.isArray(e)||"[object Object]"!==Object.prototype.toString.call(e[0]))return this.log("markRanges() will only accept an array of objects"),this.opt.noMatch(e),[];var n=[],r=0;return e.sort((function(e,t){return e.start-t.start})).forEach((function(e){var i=t.callNoMatchOnInvalidRanges(e,r),o=i.start,a=i.end;i.valid&&(e.start=o,e.length=a-o,n.push(e),r=a)})),n}},{key:"callNoMatchOnInvalidRanges",value:function(e,t){var n=void 0,r=void 0,i=!1;return e&&void 0!==e.start?(r=(n=parseInt(e.start,10))+parseInt(e.length,10),this.isNumeric(e.start)&&this.isNumeric(e.length)&&r-t>0&&r-n>0?i=!0:(this.log("Ignoring invalid or overlapping range: "+JSON.stringify(e)),this.opt.noMatch(e))):(this.log("Ignoring invalid range: "+JSON.stringify(e)),this.opt.noMatch(e)),{start:n,end:r,valid:i}}},{key:"checkWhitespaceRanges",value:function(e,t,n){var r=void 0,i=!0,o=n.length,a=t-o,s=parseInt(e.start,10)-a;return(r=(s=s>o?o:s)+parseInt(e.length,10))>o&&(r=o,this.log("End range automatically set to the max value of "+o)),s<0||r-s<0||s>o||r>o?(i=!1,this.log("Invalid range: "+JSON.stringify(e)),this.opt.noMatch(e)):""===n.substring(s,r).replace(/\s+/g,"")&&(i=!1,this.log("Skipping whitespace only range: "+JSON.stringify(e)),this.opt.noMatch(e)),{start:s,end:r,valid:i}}},{key:"getTextNodes",value:function(e){var t=this,n="",r=[];this.iterator.forEachNode(NodeFilter.SHOW_TEXT,(function(e){r.push({start:n.length,end:(n+=e.textContent).length,node:e})}),(function(e){return t.matchesExclude(e.parentNode)?NodeFilter.FILTER_REJECT:NodeFilter.FILTER_ACCEPT}),(function(){e({value:n,nodes:r})}))}},{key:"matchesExclude",value:function(e){return i.matches(e,this.opt.exclude.concat(["script","style","title","head","html"]))}},{key:"wrapRangeInTextNode",value:function(e,t,n){var r=this.opt.element?this.opt.element:"mark",i=e.splitText(t),o=i.splitText(n-t),a=document.createElement(r);return a.setAttribute("data-markjs","true"),this.opt.className&&a.setAttribute("class",this.opt.className),a.textContent=i.textContent,i.parentNode.replaceChild(a,i),o}},{key:"wrapRangeInMappedTextNode",value:function(e,t,n,r,i){var o=this;e.nodes.every((function(a,s){var l=e.nodes[s+1];if(void 0===l||l.start>t){if(!r(a.node))return!1;var c=t-a.start,u=(n>a.end?a.end:n)-a.start,d=e.value.substr(0,a.start),h=e.value.substr(u+a.start);if(a.node=o.wrapRangeInTextNode(a.node,c,u),e.value=d+h,e.nodes.forEach((function(t,n){n>=s&&(e.nodes[n].start>0&&n!==s&&(e.nodes[n].start-=u),e.nodes[n].end-=u)})),n-=u,i(a.node.previousSibling,a.start),!(n>a.end))return!1;t=a.end}return!0}))}},{key:"wrapMatches",value:function(e,t,n,r,i){var o=this,a=0===t?0:t+1;this.getTextNodes((function(t){t.nodes.forEach((function(t){t=t.node;for(var i=void 0;null!==(i=e.exec(t.textContent))&&""!==i[a];)if(n(i[a],t)){var s=i.index;if(0!==a)for(var l=1;l{if(null==e||null==e)return[];let n=e.toString().trim().toLowerCase(),i=[];for(let e=0;e<=n.length-2;e++){let o=r.utils.clone(t)||{};o.position=[e,e+2],o.index=i.length,i.push(new r.Token(n.slice(e,e+2),o))}return i},d=e=>{const t=document.querySelector("#searchResults"),n=document.querySelector("#searchBox").value;for(;t.firstChild;)t.removeChild(t.firstChild);if(!e.length){let e=document.createElement("div");return e.className="searchResultPage",e.innerHTML='No results found for query "'+n+'"',void t.append(e)}let r=new a.a(document.querySelector("#searchResults"));e.slice(0,10).forEach((e,i)=>{let o=document.createElement("div");o.className="searchResultPage";let a=l[i].matchData.metadata,s=a[Object.keys(a)[0]].body.position[0][0],c=s-50>0?s-50:0,u=document.createElement("a");u.className="searchResultTitle",u.href=e.ref,u.innerHTML=e.title,o.append(u);let d=document.createElement("div");d.className="searchResultBody",d.innerHTML=e.body.substr(c,100),o.append(d),t.append(o),r.mark(n)})};(()=>{let e=new XMLHttpRequest;e.open("GET","../post/index.json",!0),e.onload=function(){this.status>=200&&this.status<400?(c=JSON.parse(this.response),s=i()((function(){this.tokenizer=u,this.pipeline.reset(),this.ref("ref"),this.field("title",{boost:10}),this.field("body"),this.metadataWhitelist=["position"],c.forEach(e=>{this.add(e)},this)}))):console.error("Error getting Hugo index flie")},e.onerror=function(){console.error("connection error")},e.send()})(),(()=>{const e=document.querySelector("#searchBox");null!==e&&e.addEventListener("keyup",(function(e){let t=document.querySelector("#searchResults"),n=e.currentTarget.value;n.length<2?t.style.display="none":(d((e=>(l=s.search((e=>{const t=e.toString().trim().toLowerCase(),n=[];for(let e=0;e<=t.length-2;e++)n.push(t.slice(e,e+2));return n.join(" ")})(e)),l.map(e=>c.filter(t=>t.ref===e.ref)[0])))(n)),t.style.display="block")}))})()},function(e,t,n){var r=n(1),i=n(7);"string"==typeof(i=i.__esModule?i.default:i)&&(i=[[e.i,i,""]]);var o={insert:"head",singleton:!1},a=(r(i,o),i.locals?i.locals:{});e.exports=a},function(e,t,n){(t=n(2)(!1)).push([e.i,"/* noto-sans-jp-regular - japanese_latin */\n@font-face {\n font-family: 'Noto Sans JP';\n font-style: normal;\n font-weight: 400;\n font-display: swap;\n src: local('Noto Sans Japanese Regular'), local('NotoSansJapanese-Regular'),\n url('../fonts/noto-sans-jp-v25-japanese_latin-regular.woff2') format('woff2'), /* Super Modern Browsers */\n url('../fonts/noto-sans-jp-v25-japanese_latin-regular.woff') format('woff'); /* Modern Browsers */\n}\n\n/* roboto-regular - latin */\n@font-face {\n font-family: 'Roboto';\n font-style: normal;\n font-weight: 400;\n font-display: swap;\n src: local('Roboto'), local('Roboto-Regular'),\n url('../fonts/roboto-v20-latin-regular.woff2') format('woff2'), /* Super Modern Browsers */\n url('../fonts/roboto-v20-latin-regular.woff') format('woff'); /* Modern Browsers */\n}\n\nhtml {\n background-color: #FFFFFF;\n}\n\nbody {\n margin: 0;\n padding: 0;\n font-family: 'Noto Sans JP', sans-serif;\n font-size: 16px;\n color: #42464c;\n background: 0 0;\n flex-flow: column;\n text-rendering: optimizeLegibility;\n}\n\na {\n outline: none;\n text-decoration: none;\n}\n\n.error-text {\n font-family: 'Roboto', Helvetica, sans-serif;\n text-align: center;\n}\n\n.header {\n margin: auto;\n position: relative;\n}\n\n.navbar {\n min-height: 50px;\n margin-bottom: 20px;\n}\n\n.nav {\n top: 0;\n position: relative;\n max-width: 800px;\n margin: 20px auto;\n padding: 0 10px;\n text-align: right;\n}\n\n.nav-logo {\n float: left;\n transition: transform 300ms ease-out;\n}\n\n.nav-logo:hover {\n transform: scale(1.1);\n}\n\n.nav-logo img {\n display: block;\n width: auto;\n}\n\n.nav-links {\n margin: 0;\n padding: 0;\n font-size: 14px;\n list-style: none;\n}\n\n.nav-links li {\n display: inline-block;\n margin: 0 0 0 10px;\n}\n\n.nav-links li a em {\n color: #000000;\n}\n\n.intro-header {\n margin: 40px 0 20px;\n position: relative;\n}\n\n.intro-header [class$=\"-heading\"] {\n text-align: center;\n}\n\n.intro-header [class$=\"-heading\"] h1 {\n margin-top: 0;\n padding-top: 0;\n font-size: 50px;\n}\n\nh1,h2,h3,h4,h5,h6 {\n font-family: 'Roboto', Helvetica, sans-serif;\n font-weight: 800;\n color: #111111;\n}\n\n.container[role=main] {\n max-width: 700px;\n padding: 0 15px;\n font-size: 16px;\n line-height: 1.7;\n color: #333333;\n}\n\n.container img {\n width: 100%;\n}\n\n#blog-archives {\n margin: 20px auto;\n font-size: 14px;\n}\n\n.archives {\n margin: 20px auto;\n}\n\n.archives td {\n border: none;\n text-align: left;\n}\n\n.article {\n text-align: justify;\n}\n\n#TableOfContents {\n font-size: 14px;\n border: 2px dotted #cccccc;\n margin: 1em 0;\n padding: 0.5em 0;\n background-color: #f0f0f0;\n}\n\n#TableOfContents ul {\n list-style-type: none;\n}\n\n#TableOfContents ul ul {\n list-style-type: disc;\n}\n\np {\n line-height: 1.5;\n margin: 0.5em 0;\n}\n\np + p {\n margin-top: 1em;\n}\n\n.social-icon {\n margin-left: 0.2em;\n margin-right: 0.2em;\n}\n\n.post-preview {\n padding-bottom: 10px;\n border-bottom: 1px solid #eeeeee;\n}\n\n.post-preview a {\n text-decoration: none;\n color: #222222;\n}\n\n.post-preview:last-child {\n border-bottom: 0;\n}\n\n.postmeta {\n margin: 10px 0;\n}\n\n.blog-tags {\n font-family: 'Roboto', Helvetica, sans-serif;\n color: #999999;\n font-size: 15px;\n margin: 30px 0;\n}\n\n.blog-tags a {\n color: #0000BB;\n text-decoration: none;\n padding: 0px 5px;\n}\n\n.blog-tags a:before {\n content: \"#\";\n}\n\nh4.term-name > span.badge {\n float: right;\n}\n\ndiv.panel-body {\n font-family: 'Roboto', Helvetica, sans-serif;\n font-weight: 800;\n border-radius: 0;\n border: none;\n font-size: 16px;\n}\n\n.post-entry {\n width: 100%;\n margin-top: 10px;\n}\n\n.post-read-more {\n font-family: 'Roboto', Helvetica, sans-serif;\n font-weight: 800;\n float: right;\n position: relative;\n display: block;\n text-decoration: none;\n}\n\na.post-read-more::after {\n position: absolute;\n bottom: -4px;\n left: 0;\n content: '';\n width: 100%;\n height: 2px;\n background: #333;\n transform: scale(0, 1);\n transform-origin: center top;\n transition: transform .3s;\n}\n\na.post-read-more:hover::after {\n transform: scale(1, 1);\n}\n\nblockquote {\n color: #808080;\n padding: 0 10px;\n border-left: 4px solid #aaaaaa;\n}\n\nblockquote p:first-child {\n margin-top: 0;\n}\n\ntable {\n padding: 0;\n border-spacing: 0;\n}\n\ntable tr {\n border-top: 1px solid #dddddd;\n margin: 0;\n padding: 0;\n}\n\ntable tr th {\n font-weight: bold;\n border: 1px solid #dddddd;\n text-align: left;\n margin: 0;\n padding: 6px 13px;\n}\n\ntable tr td {\n border: 1px solid #dddddd;\n text-align: left;\n margin: 0;\n padding: 6px 12px;\n}\n\ntable tr th :first-child,\ntable tr td :first-child {\n margin-top: 0;\n}\n\ntable tr th :last-child,\ntable tr td :last-child {\n margin-bottom: 0;\n}\n\n.chroma .ln {\n margin-right: 0.8em;\n padding: 0 0.4em 0 0.4em;\n}\n\npre {\n display: block;\n padding: 9.5px;\n margin: 0 0 10px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #333;\n word-break: break-all;\n word-wrap: break-word;\n background-color: #f5f5f5;\n border: 1px solid #cccccc;\n border-radius: 4px;\n}\n\npre code {\n padding: 0;\n font-family: Menlo, Monaco, Consolas, monospace;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #dd0011;\n background-color: #f9f9f9;\n border-radius: 4px;\n}\n\n#backtotopButton {\n position: fixed;\n bottom: 20px;\n right: 20px;\n z-index: 99;\n border: none;\n outline: none;\n background-color: #eeeeff;\n cursor: pointer;\n padding: 15px;\n border-radius: 10px;\n font-size: 16px;\n text-align: center;\n}\n\n#backtotopButton:hover {\n background-color: #aaaaaa;\n}\n\n.searchBoxContainer {\n position: relative;\n width: 300px;\n height: 30px;\n margin: 10px auto 50px auto;\n}\n\ninput.searchBox {\n position: absolute;\n width: 100%;\n padding: 0 35px 0 15px;\n top: 0;\n left: 0;\n right: 0;\n bottom: 0;\n border-radius: 15px;\n outline: 0;\n font-size: 16px;\n color: #707070;\n background-color:#f6f6f6;\n border: solid 1px #c9c9c9;\n box-sizing: border-box;\n}\n\n.searchBox::placeholder {\n color: #c9c9c9;\n}\n\n.searchResults {\n display: none;\n max-width: 600px;\n min-width: 300px;\n margin: 0 auto;\n top: 210px;\n left: 0;\n right: 0;\n padding: 5px;\n border-radius: 5px;\n text-align: left;\n}\n\n.searchResultPage {\n padding: 14px\n}\n\n.searchResultTitle {\n font-family: 'Roboto', Helvetica, sans-serif;\n font-weight: bold;\n font-size: 24px;\n margin: 5px 0;\n}\n\n.searchResultBody {\n font-size: 16px;\n}\n\nmark {\n background-color: #eeff00;\n}\n\n.pager {\n list-style: none;\n text-align: center;\n margin:20px 0 0;\n padding-left: 0;\n}\n\n.pager ul {\n display: block;\n}\n\n.pager li {\n display: inline;\n}\n\n.pager li a {\n box-sizing: border-box;\n font-family: 'Roboto', Helvetica, sans-serif;\n text-transform: uppercase;\n text-align: center;\n font-size: 14px;\n font-weight: 800;\n letter-spacing: 1px;\n padding: 10px 5px;\n background: #ffffff;\n border-radius: 0;\n border: 1px solid #dddddd;\n display: inline-block;\n color: #404040;\n text-decoration: none;\n}\n\n.pager a:hover:not(.active) {\n background-color: #dddddd;\n}\n\n.pager .previous > a {\n float: left;\n display: block;\n}\n\n.pager .next > a {\n float: right;\n display: block;\n}\n\nfooter {\n padding: 60px 0;\n text-align: center;\n margin-top: auto;\n font-size: 14px;\n font-family: 'Roboto', Helvetica, sans-serif;\n}\n\nfooter .copyright {\n font-family: 'Roboto', Helvetica, sans-serif;\n text-align: center;\n margin-bottom: 0;\n}\n\nfooter .theme-by {\n text-align: center;\n margin: 10px 0 0;\n}\n\nfooter a {\n color: #050505;\n font-weight: bold;\n}\n\nfooter em {\n cursor: pointer;\n}\n\n@media (min-width: 600px) {\n .header {\n margin: auto;\n }\n\n .nav-links {\n font-size: 18px;\n }\n\n .nav-links li {\n margin: 0 0 0 30px;\n }\n\n .container[role=main] {\n font-size: 16px;\n line-height: 1.8;\n margin: 40px auto;\n }\n\n .blog-tags {\n margin: 20px 0;\n }\n\n .pager li a {\n padding: 10px 20px;\n }\n\n .pager.blog-pager {\n margin-top: 40px;\n }\n}\n",""]),e.exports=t},function(e,t,n){var r=n(1),i=n(9);"string"==typeof(i=i.__esModule?i.default:i)&&(i=[[e.i,i,""]]);var o={insert:"head",singleton:!1},a=(r(i,o),i.locals?i.locals:{});e.exports=a},function(e,t,n){(t=n(2)(!1)).push([e.i,"/* Background */ .chroma { background-color: #f8f8f8 }\n/* Other */ .chroma .x { color: #000000 }\n/* Error */ .chroma .err { color: #a40000 }\n/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }\n/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; width: auto; overflow: auto; display: block; }\n/* LineHighlight */ .chroma .hl { display: block; width: 100%;background-color: #ffffcc }\n/* LineNumbersTable */ .chroma .lnt { margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f }\n/* LineNumbers */ .chroma .ln { margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: #7f7f7f }\n/* Keyword */ .chroma .k { color: #204a87; font-weight: bold }\n/* KeywordConstant */ .chroma .kc { color: #204a87; font-weight: bold }\n/* KeywordDeclaration */ .chroma .kd { color: #204a87; font-weight: bold }\n/* KeywordNamespace */ .chroma .kn { color: #204a87; font-weight: bold }\n/* KeywordPseudo */ .chroma .kp { color: #204a87; font-weight: bold }\n/* KeywordReserved */ .chroma .kr { color: #204a87; font-weight: bold }\n/* KeywordType */ .chroma .kt { color: #204a87; font-weight: bold }\n/* Name */ .chroma .n { color: #000000 }\n/* NameAttribute */ .chroma .na { color: #c4a000 }\n/* NameBuiltin */ .chroma .nb { color: #204a87 }\n/* NameBuiltinPseudo */ .chroma .bp { color: #3465a4 }\n/* NameClass */ .chroma .nc { color: #000000 }\n/* NameConstant */ .chroma .no { color: #000000 }\n/* NameDecorator */ .chroma .nd { color: #5c35cc; font-weight: bold }\n/* NameEntity */ .chroma .ni { color: #ce5c00 }\n/* NameException */ .chroma .ne { color: #cc0000; font-weight: bold }\n/* NameFunction */ .chroma .nf { color: #000000 }\n/* NameFunctionMagic */ .chroma .fm { color: #000000 }\n/* NameLabel */ .chroma .nl { color: #f57900 }\n/* NameNamespace */ .chroma .nn { color: #000000 }\n/* NameOther */ .chroma .nx { color: #000000 }\n/* NameProperty */ .chroma .py { color: #000000 }\n/* NameTag */ .chroma .nt { color: #204a87; font-weight: bold }\n/* NameVariable */ .chroma .nv { color: #000000 }\n/* NameVariableClass */ .chroma .vc { color: #000000 }\n/* NameVariableGlobal */ .chroma .vg { color: #000000 }\n/* NameVariableInstance */ .chroma .vi { color: #000000 }\n/* NameVariableMagic */ .chroma .vm { color: #000000 }\n/* Literal */ .chroma .l { color: #000000 }\n/* LiteralDate */ .chroma .ld { color: #000000 }\n/* LiteralString */ .chroma .s { color: #4e9a06 }\n/* LiteralStringAffix */ .chroma .sa { color: #4e9a06 }\n/* LiteralStringBacktick */ .chroma .sb { color: #4e9a06 }\n/* LiteralStringChar */ .chroma .sc { color: #4e9a06 }\n/* LiteralStringDelimiter */ .chroma .dl { color: #4e9a06 }\n/* LiteralStringDoc */ .chroma .sd { color: #8f5902; font-style: italic }\n/* LiteralStringDouble */ .chroma .s2 { color: #4e9a06 }\n/* LiteralStringEscape */ .chroma .se { color: #4e9a06 }\n/* LiteralStringHeredoc */ .chroma .sh { color: #4e9a06 }\n/* LiteralStringInterpol */ .chroma .si { color: #4e9a06 }\n/* LiteralStringOther */ .chroma .sx { color: #4e9a06 }\n/* LiteralStringRegex */ .chroma .sr { color: #4e9a06 }\n/* LiteralStringSingle */ .chroma .s1 { color: #4e9a06 }\n/* LiteralStringSymbol */ .chroma .ss { color: #4e9a06 }\n/* LiteralNumber */ .chroma .m { color: #0000cf; font-weight: bold }\n/* LiteralNumberBin */ .chroma .mb { color: #0000cf; font-weight: bold }\n/* LiteralNumberFloat */ .chroma .mf { color: #0000cf; font-weight: bold }\n/* LiteralNumberHex */ .chroma .mh { color: #0000cf; font-weight: bold }\n/* LiteralNumberInteger */ .chroma .mi { color: #0000cf; font-weight: bold }\n/* LiteralNumberIntegerLong */ .chroma .il { color: #0000cf; font-weight: bold }\n/* LiteralNumberOct */ .chroma .mo { color: #0000cf; font-weight: bold }\n/* Operator */ .chroma .o { color: #ce5c00; font-weight: bold }\n/* OperatorWord */ .chroma .ow { color: #204a87; font-weight: bold }\n/* Punctuation */ .chroma .p { color: #000000; font-weight: bold }\n/* Comment */ .chroma .c { color: #8f5902; font-style: italic }\n/* CommentHashbang */ .chroma .ch { color: #8f5902; font-style: italic }\n/* CommentMultiline */ .chroma .cm { color: #8f5902; font-style: italic }\n/* CommentSingle */ .chroma .c1 { color: #8f5902; font-style: italic }\n/* CommentSpecial */ .chroma .cs { color: #8f5902; font-style: italic }\n/* CommentPreproc */ .chroma .cp { color: #8f5902; font-style: italic }\n/* CommentPreprocFile */ .chroma .cpf { color: #8f5902; font-style: italic }\n/* Generic */ .chroma .g { color: #000000 }\n/* GenericDeleted */ .chroma .gd { color: #a40000 }\n/* GenericEmph */ .chroma .ge { color: #000000; font-style: italic }\n/* GenericError */ .chroma .gr { color: #ef2929 }\n/* GenericHeading */ .chroma .gh { color: #000080; font-weight: bold }\n/* GenericInserted */ .chroma .gi { color: #00a000 }\n/* GenericOutput */ .chroma .go { color: #000000; font-style: italic }\n/* GenericPrompt */ .chroma .gp { color: #8f5902 }\n/* GenericStrong */ .chroma .gs { color: #000000; font-weight: bold }\n/* GenericSubheading */ .chroma .gu { color: #800080; font-weight: bold }\n/* GenericTraceback */ .chroma .gt { color: #a40000; font-weight: bold }\n/* GenericUnderline */ .chroma .gl { color: #000000; text-decoration: underline }\n/* TextWhitespace */ .chroma .w { color: #f8f8f8; text-decoration: underline }\n",""]),e.exports=t},function(e,t){window.onload=function(){var e=document.getElementById("dark-mode-toggle"),t=document.getElementById("dark-mode-theme");function n(n){localStorage.setItem("dark-mode-storage",n),"dark"===n?(t.disabled=!1,e.className="fas fa-sun"):"light"===n&&(t.disabled=!0,e.className="fas fa-moon")}window.matchMedia("(prefers-color-scheme: dark)").matches?n(localStorage.getItem("dark-mode-storage")||"dark"):n(localStorage.getItem("dark-mode-storage")||"light"),e.addEventListener("click",()=>{"fas fa-moon"===e.className?n("dark"):"fas fa-sun"===e.className&&n("light")})}}]); \ No newline at end of file diff --git a/blog/themes/harbor/static/js/instantpage.min.js b/blog/themes/harbor/static/js/instantpage.min.js new file mode 100644 index 0000000..4a08db3 --- /dev/null +++ b/blog/themes/harbor/static/js/instantpage.min.js @@ -0,0 +1,2 @@ +/*! instant.page v5.1.0 - (C) 2019-2020 Alexandre Dieulot - https://instant.page/license */ +let t,e;const n=new Set,o=document.createElement("link"),i=o.relList&&o.relList.supports&&o.relList.supports("prefetch")&&window.IntersectionObserver&&"isIntersecting"in IntersectionObserverEntry.prototype,s="instantAllowQueryString"in document.body.dataset,a="instantAllowExternalLinks"in document.body.dataset,r="instantWhitelist"in document.body.dataset,c="instantMousedownShortcut"in document.body.dataset,d=1111;let l=65,u=!1,f=!1,m=!1;if("instantIntensity"in document.body.dataset){const t=document.body.dataset.instantIntensity;if("mousedown"==t.substr(0,"mousedown".length))u=!0,"mousedown-only"==t&&(f=!0);else if("viewport"==t.substr(0,"viewport".length))navigator.connection&&(navigator.connection.saveData||navigator.connection.effectiveType&&navigator.connection.effectiveType.includes("2g"))||("viewport"==t?document.documentElement.clientWidth*document.documentElement.clientHeight<45e4&&(m=!0):"viewport-all"==t&&(m=!0));else{const e=parseInt(t);isNaN(e)||(l=e)}}if(i){const n={capture:!0,passive:!0};if(f||document.addEventListener("touchstart",function(t){e=performance.now();const n=t.target.closest("a");if(!h(n))return;v(n.href)},n),u?c||document.addEventListener("mousedown",function(t){const e=t.target.closest("a");if(!h(e))return;v(e.href)},n):document.addEventListener("mouseover",function(n){if(performance.now()-e{v(o.href),t=void 0},l)},n),c&&document.addEventListener("mousedown",function(t){if(performance.now()-e1||t.metaKey||t.ctrlKey)return;if(!n)return;n.addEventListener("click",function(t){1337!=t.detail&&t.preventDefault()},{capture:!0,passive:!1,once:!0});const o=new MouseEvent("click",{view:window,bubbles:!0,cancelable:!1,detail:1337});n.dispatchEvent(o)},n),m){let t;(t=window.requestIdleCallback?t=>{requestIdleCallback(t,{timeout:1500})}:t=>{t()})(()=>{const t=new IntersectionObserver(e=>{e.forEach(e=>{if(e.isIntersecting){const n=e.target;t.unobserve(n),v(n.href)}})});document.querySelectorAll("a").forEach(e=>{h(e)&&t.observe(e)})})}}function p(e){e.relatedTarget&&e.target.closest("a")==e.relatedTarget.closest("a")||t&&(clearTimeout(t),t=void 0)}function h(t){if(t&&t.href&&(!r||"instant"in t.dataset)&&(a||t.origin==location.origin||"instant"in t.dataset)&&["http:","https:"].includes(t.protocol)&&("http:"!=t.protocol||"https:"!=location.protocol)&&(s||!t.search||"instant"in t.dataset)&&!(t.hash&&t.pathname+t.search==location.pathname+location.search||"noInstant"in t.dataset))return!0}function v(t){if(n.has(t))return;const e=document.createElement("link");e.rel="prefetch",e.href=t,document.head.appendChild(e),n.add(t)} \ No newline at end of file diff --git a/blog/themes/harbor/static/src/main.js b/blog/themes/harbor/static/src/main.js new file mode 100644 index 0000000..d06e500 --- /dev/null +++ b/blog/themes/harbor/static/src/main.js @@ -0,0 +1,153 @@ +import lunr, { Token, utils } from 'lunr' +import Mark from 'mark.js' +import '../css/main.css' +import '../css/syntax.css' + +let lunrIndex +let lunrResult +let pagesIndex + +const bigramTokeniser = (obj, metadata) => { + if (obj == null || obj == undefined) { + return [] + } + + let str = obj.toString().trim().toLowerCase() + let tokens = [] + + for (let i = 0; i <= str.length - 2; i++) { + let tokenMetadata = utils.clone(metadata) || {} + tokenMetadata['position'] = [i, i + 2] + tokenMetadata['index'] = tokens.length + tokens.push(new Token(str.slice(i, i + 2), tokenMetadata)) + } + + return tokens +} + +const queryNgramSeparator = (query) => { + const str = query.toString().trim().toLowerCase() + const tokens = [] + + for (let i = 0; i <= str.length - 2; i++) { + tokens.push(str.slice(i, i + 2)) + } + + return tokens.join(' ') +} + +const index = '../post/index.json' + +const initLunr = () => { + let request = new XMLHttpRequest() + request.open('GET', index, true) + request.onload = function () { + if (this.status >= 200 && this.status < 400) { + pagesIndex = JSON.parse(this.response) + lunrIndex = lunr(function () { + this.tokenizer = bigramTokeniser + this.pipeline.reset() + this.ref('ref') + this.field('title', { boost: 10 }) + this.field('body') + this.metadataWhitelist = ['position'] + pagesIndex.forEach((page) => { + this.add(page) + }, this) + }) + } else { + console.error('Error getting Hugo index flie') + } + } + request.onerror = function () { + console.error('connection error') + } + request.send() +} + +/** + * Searching pages using lunr + * @param {String} query Query string for searching + * @return {Object[]} Array of search results + */ +const search = (query) => { + lunrResult = lunrIndex.search(queryNgramSeparator(query)) + return lunrResult.map((result) => { + return pagesIndex.filter((page) => { + return page.ref === result.ref + })[0] + }) +} + +const initUI = () => { + const searchBox = document.querySelector('#searchBox') + if (searchBox === null) { + return + } + searchBox.addEventListener('keyup', function (event) { + let searchResultsArea = document.querySelector('#searchResults') + let query = event.currentTarget.value + + // Only trigger a search when 2 chars. at least have been provided + if (query.length < 2) { + searchResultsArea.style.display = 'none' + return + } + + // Display search results + renderResults(search(query)) + searchResultsArea.style.display = 'block' + }) +} + +/** + * Rendering search results + * @param {Object[]} results Array of search results + */ +const renderResults = (results) => { + const searchResults = document.querySelector('#searchResults') + const query = document.querySelector('#searchBox').value + const BODY_LENGTH = 100 + const MAX_PAGES = 10 + + // Clear search result + while (searchResults.firstChild) + searchResults.removeChild(searchResults.firstChild) + + // Show message when results is empty + if (!results.length) { + let resultPage = document.createElement('div') + resultPage.className = 'searchResultPage' + resultPage.innerHTML = 'No results found for query "' + query + '"' + searchResults.append(resultPage) + return + } + + let instance = new Mark(document.querySelector('#searchResults')) + // Only show the ten first results + results.slice(0, MAX_PAGES).forEach((result, idx) => { + let resultPage = document.createElement('div') + resultPage.className = 'searchResultPage' + let metadata = lunrResult[idx].matchData.metadata + let matchPosition = metadata[Object.keys(metadata)[0]].body.position[0][0] + let bodyStartPosition = + matchPosition - BODY_LENGTH / 2 > 0 ? matchPosition - BODY_LENGTH / 2 : 0 + + let resultTitle = document.createElement('a') + resultTitle.className = 'searchResultTitle' + resultTitle.href = result.ref + resultTitle.innerHTML = result.title + resultPage.append(resultTitle) + + let resultBody = document.createElement('div') + resultBody.className = 'searchResultBody' + resultBody.innerHTML = result.body.substr(bodyStartPosition, BODY_LENGTH) + resultPage.append(resultBody) + searchResults.append(resultPage) + + instance.mark(query) + }) +} + +initLunr() +initUI() diff --git a/blog/themes/harbor/static/src/theme.js b/blog/themes/harbor/static/src/theme.js new file mode 100644 index 0000000..3bd7466 --- /dev/null +++ b/blog/themes/harbor/static/src/theme.js @@ -0,0 +1,29 @@ +window.onload = function () { + var toggle = document.getElementById('dark-mode-toggle') + var darkTheme = document.getElementById('dark-mode-theme') + + if (window.matchMedia('(prefers-color-scheme: dark)').matches) { + setTheme(localStorage.getItem('dark-mode-storage') || 'dark') + } else { + setTheme(localStorage.getItem('dark-mode-storage') || 'light') + } + + toggle.addEventListener('click', () => { + if (toggle.className === 'fas fa-moon') { + setTheme('dark') + } else if (toggle.className === 'fas fa-sun') { + setTheme('light') + } + }) + + function setTheme(mode) { + localStorage.setItem('dark-mode-storage', mode) + if (mode === 'dark') { + darkTheme.disabled = false + toggle.className = 'fas fa-sun' + } else if (mode === 'light') { + darkTheme.disabled = true + toggle.className = 'fas fa-moon' + } + } +} diff --git a/blog/themes/harbor/theme.toml b/blog/themes/harbor/theme.toml new file mode 100644 index 0000000..ec6836b --- /dev/null +++ b/blog/themes/harbor/theme.toml @@ -0,0 +1,12 @@ +name = "harbor" +license = "MIT" +licenselink = "https://github.com/matsuyoshi30/harbor/blob/master/LICENSE" +description = "Simple and minimal personal blog theme" +homepage = "http://github.com/matsuyoshi30/harbor/" +tags = ["blog", "minimal", "simple"] +features = ["minimal", "simple", "responsive", "google analytics integration", "syntax highlight"] +min_version = "0.60" + +[author] + name = "matsuyoshi30" + homepage = "https://matsuyoshi30.net" \ No newline at end of file diff --git a/blog/themes/harbor/webpack.common.js b/blog/themes/harbor/webpack.common.js new file mode 100644 index 0000000..feb147c --- /dev/null +++ b/blog/themes/harbor/webpack.common.js @@ -0,0 +1,23 @@ +const path = require('path') + +module.exports = { + entry: {"main.js" : ['./static/src/main.js', './static/src/theme.js']}, + output: { + path: `${__dirname}/static/js/`, + filename: 'bundle.js' + }, + module: { + rules: [ + { + test: /\.css/, + use: [ + "style-loader", + { + loader: "css-loader", + options: { url: false } + } + ] + } + ] + } +}; \ No newline at end of file diff --git a/blog/themes/harbor/webpack.development.js b/blog/themes/harbor/webpack.development.js new file mode 100644 index 0000000..30a8386 --- /dev/null +++ b/blog/themes/harbor/webpack.development.js @@ -0,0 +1,11 @@ +const merge = require('webpack-merge') +const common = require('./webpack.common.js') + +module.exports = merge(common, { + mode: 'development', + devtool: 'source-map', + output: { + path: `${__dirname}/static/js/`, + filename: 'bundle.js' + } + }) \ No newline at end of file diff --git a/blog/themes/harbor/webpack.production.js b/blog/themes/harbor/webpack.production.js new file mode 100644 index 0000000..c51b9ff --- /dev/null +++ b/blog/themes/harbor/webpack.production.js @@ -0,0 +1,10 @@ +const merge = require('webpack-merge') +const common = require('./webpack.common.js') + +module.exports = merge(common, { + mode: 'production', + output: { + path: `${__dirname}/static/js/`, + filename: 'bundle.js' + } +}) \ No newline at end of file diff --git a/blog/themes/theme/.all-contributorsrc b/blog/themes/theme/.all-contributorsrc new file mode 100644 index 0000000..f57352a --- /dev/null +++ b/blog/themes/theme/.all-contributorsrc @@ -0,0 +1,215 @@ +{ + "files": [ + "README.md" + ], + "imageSize": 100, + "commit": false, + "contributors": [ + { + "login": "jakewies", + "name": "Jake Wiesler", + "avatar_url": "https://avatars1.githubusercontent.com/u/12075916?v=4", + "profile": "https://www.jakewiesler.com", + "contributions": [ + "code", + "design", + "doc" + ] + }, + { + "login": "chuxinh", + "name": "Chuxin Huang", + "avatar_url": "https://avatars2.githubusercontent.com/u/30974572?v=4", + "profile": "https://www.chuxinhuang.com/", + "contributions": [ + "doc", + "code", + "design" + ] + }, + { + "login": "kentnek", + "name": "Kent", + "avatar_url": "https://avatars1.githubusercontent.com/u/7024160?v=4", + "profile": "https://kentnek.com", + "contributions": [ + "code", + "doc", + "design" + ] + }, + { + "login": "somaniarushi", + "name": "Arushi Somani", + "avatar_url": "https://avatars3.githubusercontent.com/u/54224195?v=4", + "profile": "https://github.com/somaniarushi", + "contributions": [ + "doc" + ] + }, + { + "login": "xvallspl", + "name": "Xavier Valls", + "avatar_url": "https://avatars0.githubusercontent.com/u/867299?v=4", + "profile": "https://github.com/xvallspl", + "contributions": [ + "doc", + "code", + "design" + ] + }, + { + "login": "pyvain", + "name": "Pyvain", + "avatar_url": "https://avatars3.githubusercontent.com/u/2924494?v=4", + "profile": "https://github.com/pyvain", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "jlebar", + "name": "Justin Lebar", + "avatar_url": "https://avatars1.githubusercontent.com/u/150663?v=4", + "profile": "http://jlebar.com", + "contributions": [ + "code" + ] + }, + { + "login": "aareet", + "name": "Aareet Shermon", + "avatar_url": "https://avatars1.githubusercontent.com/u/33654?v=4", + "profile": "https://www.aareet.com", + "contributions": [ + "design" + ] + }, + { + "login": "dgnicholson", + "name": "dgnicholson", + "avatar_url": "https://avatars1.githubusercontent.com/u/6208288?v=4", + "profile": "https://github.com/dgnicholson", + "contributions": [ + "design", + "code" + ] + }, + { + "login": "msfjarvis", + "name": "Harsh Shandilya", + "avatar_url": "https://avatars0.githubusercontent.com/u/13348378?v=4", + "profile": "https://msfjarvis.dev", + "contributions": [ + "code" + ] + }, + { + "login": "ProfessorLogout", + "name": "Marco Kamner", + "avatar_url": "https://avatars3.githubusercontent.com/u/13572444?v=4", + "profile": "https://twitter.com/ProfessorLogout", + "contributions": [ + "code" + ] + }, + { + "login": "ewenme", + "name": "ewen", + "avatar_url": "https://avatars3.githubusercontent.com/u/10872821?v=4", + "profile": "https://ewen.io/", + "contributions": [ + "code" + ] + }, + { + "login": "SanchithHegde", + "name": "Sanchith Hegde", + "avatar_url": "https://avatars2.githubusercontent.com/u/22217505?v=4", + "profile": "https://github.com/SanchithHegde", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "CER10TY", + "name": "Sören Johanson", + "avatar_url": "https://avatars1.githubusercontent.com/u/5760400?v=4", + "profile": "https://soeren.codes", + "contributions": [ + "code" + ] + }, + { + "login": "jdl031", + "name": "James Lloyd", + "avatar_url": "https://avatars3.githubusercontent.com/u/1720477?v=4", + "profile": "https://github.com/jdl031", + "contributions": [ + "code" + ] + }, + { + "login": "wilsonehusin", + "name": "Wilson E. Husin", + "avatar_url": "https://avatars1.githubusercontent.com/u/14004487?v=4", + "profile": "http://wilsonehusin.com", + "contributions": [ + "code" + ] + }, + { + "login": "tommorris", + "name": "Tom Morris", + "avatar_url": "https://avatars0.githubusercontent.com/u/175?v=4", + "profile": "https://tommorris.org/", + "contributions": [ + "code" + ] + }, + { + "login": "sdil", + "name": "Mohamad Fadhil", + "avatar_url": "https://avatars0.githubusercontent.com/u/461537?v=4", + "profile": "http://twitter.com/sdil", + "contributions": [ + "code" + ] + }, + { + "login": "skvale", + "name": "Sam Kvale", + "avatar_url": "https://avatars0.githubusercontent.com/u/5314713?v=4", + "profile": "https://github.com/skvale", + "contributions": [ + "code" + ] + }, + { + "login": "macxcool", + "name": "Mark C", + "avatar_url": "https://avatars.githubusercontent.com/u/2531654?v=4", + "profile": "https://github.com/macxcool", + "contributions": [ + "code" + ] + }, + { + "login": "rbnis", + "name": "Robin", + "avatar_url": "https://avatars.githubusercontent.com/u/5955614?v=4", + "profile": "https://rbn.is", + "contributions": [ + "code" + ] + } + ], + "contributorsPerLine": 7, + "projectName": "hugo-theme-codex", + "projectOwner": "jakewies", + "repoType": "github", + "repoHost": "https://github.com", + "skipCi": true +} diff --git a/blog/themes/theme/.gitignore b/blog/themes/theme/.gitignore new file mode 100644 index 0000000..f0401df --- /dev/null +++ b/blog/themes/theme/.gitignore @@ -0,0 +1,11 @@ +node_modules + +# OS +.DS_Store +Thumbs.db + +# IDEs +.vscode + +# Hugo +public/ diff --git a/blog/themes/theme/.prettierignore b/blog/themes/theme/.prettierignore new file mode 100644 index 0000000..2cff650 --- /dev/null +++ b/blog/themes/theme/.prettierignore @@ -0,0 +1,11 @@ +exampleSite/content +layouts +static + +package.json +public +node_modules + +.DS_Store +logs +*.log diff --git a/blog/themes/theme/CONTRIBUTING.md b/blog/themes/theme/CONTRIBUTING.md new file mode 100644 index 0000000..27976c4 --- /dev/null +++ b/blog/themes/theme/CONTRIBUTING.md @@ -0,0 +1,48 @@ +# Contributing + +First off, thank you for your time and effort! This project is not very large +and easy to jump into. + +## Getting started + +1. Fork and clone the repository +2. Install dependencies: + +```bash +cd hugo-theme-codex + +yarn install +``` + +## Scripts + +```bash +yarn develop +``` + +Triggers a one-time build of the static directory. + +```bash +yarn format +``` + +Runs prettier on the entire project directory. + +## Assets + +The `assets/` directory is where you can write JS and SCSS, which get processed +into CSS files before being placed in the `static/` directory. + +### Making `scss` changes + +If you make a change to a source `scss` file in `assets/scss`, you will need to make sure that you rebuild the `exampleSite/resources/_gen/assets/` directory to reflect the change in the demo site, else the demo's styles will become stale. + +If you are running `yarn develop`, `hugo` will pick up these changes by default. You can also run `yarn build:example`, which will trigger a one-time rebuild of the example site. From there, commit the updated `resources/_gen` directory to version control. + +## Questions + +If you have any questions feel free to reach out to me directly. Best ways to +contact me: + +- Twitter: https://twitter.com/jakewies +- Email: jakewiesler@gmail.com diff --git a/blog/themes/theme/LICENSE.md b/blog/themes/theme/LICENSE.md new file mode 100644 index 0000000..faff36e --- /dev/null +++ b/blog/themes/theme/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2020 YOUR_NAME_HERE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/blog/themes/theme/README.md b/blog/themes/theme/README.md new file mode 100644 index 0000000..da36e03 --- /dev/null +++ b/blog/themes/theme/README.md @@ -0,0 +1,268 @@ +## Codex + +A minimal blog theme built for [Hugo](https://gohugo.io/) 🍜 + +![Hugo desktop screenshot](/images/screenshot.png) + +- An about page 👋🏻 and a blog 📝 +- Blog posts can be tagged 🏷 +- Mathematical notations are supported with KaTex 📐 +- Sass/SCSS for styling ✨ +- Support for Google Analytics 📈 and Disqus 💬 +- i18n support + +### Prerequisites + +Hugo **extended version** (for Sass/SCSS support). + +For macOS users, the extended version is installed by default if you use `homebrew`. + +For Windows users, you can install with `choco`: +``` +choco install hugo-extended -confirm +``` + +### Getting started + +At the root of your Hugo project, run: + +```bash +git submodule add https://github.com/jakewies/hugo-theme-codex.git themes/hugo-theme-codex +``` + +Next, copy the contents of the [`exampleSite/config.toml`](https://github.com/jakewies/hugo-theme-codex/blob/master/exampleSite/config.toml) to your site's `config.toml`. Make sure to read all the comments, as there a few nuances with Hugo themes that require some changes to that file. + +The most important change you will need to make to the `config.toml` is removing [this line](https://github.com/jakewies/hugo-theme-codex/blob/master/exampleSite/config.toml#L2): + +``` +themesDir = "../../" +``` + +It only exists in the example site so that the demo can function properly. + +Finally, run: + +``` +hugo server -D +``` + +**Note: If you are seeing a blank page it is probably because you have nothing in your `content/` directory. Read on to fix that.** + +### Configuring the Home Page + +The site's home page can be configured by creating a `content/_index.md` file. This file can use the following frontmatter: + +```md +--- +heading: "Hi, I'm Codex" +subheading: "A minimal blog theme for hugo." +handle: "hugo-theme-codex" +--- +``` + +If you would rather override the about page's layout with your own, you can do so by creating a `layouts/index.html`. You can find the `index.html` file that `hugo-theme-codex` uses [here](https://github.com/jakewies/hugo-theme-codex/blob/master/layouts/index.html). + +### Configuring Social Icons + +Social Icons are optional. To show any of these icons, just provide the value in the `[params]` section of `config.toml`. + +```toml +# config.toml + +[params] + twitter = "https://twitter.com/GoHugoIO" + github = "https://github.com/jakewies/hugo-theme-codex" + # ... + + iconOrder = ["Twitter", "GitHub"] +``` + +If any of these options are given, `hugo-theme-codex` will render the social icon in the footer, using the order specified in `iconOrder`. + +See the contents of the [example site](https://github.com/jakewies/hugo-theme-codex/tree/master/exampleSite) for more details. + +You can also create additional social icons by: +1. Adding your own SVGs in `static/svg/`, for example `static/svg/reddit.svg`. +2. Modifying your site's config as follows: + ```toml + [params] + # ... + reddit = "" + + iconOrder = ["Reddit"] + ``` + +Make sure that the icon title must match the icon's file name. If the title contains more than one word, say "My Awesome Site", +you can use dash "-" for the icon name: `my-awesome-site.svg`. + +### Creating a blog post + +You can create a new blog post page by going to the root of your project and typing: + +``` +hugo new blog/:blog-post.md +``` + +Where `:blog-post.md` is the name of the file of your new post. + +This will execute the theme's `blog` archetype to create a new markdown file in `contents/blog/:blog-post.md` with the following frontmatter: + +```md +# Default post frontmatter: + +# The title of your post. Default value is generated +# From the markdown filename +title: "{{ replace .TranslationBaseName "-" " " | title }}" +# The date the post was created +date: {{ .Date }} +# The post filename +slug: "" +# Post description used for seo +description: "" +# Post keywords used for seo +keywords: [] +# If true, the blog post will not be included in static build +draft: true +# Categorize your post with tags +tags: [] +# Uses math typesetting +math: false +# Includes a table of contents on screens >1024px +toc: false +``` + +The frontmatter above is the default for a new post, but all values can be changed. + +### Configuring Table of Contents in blog posts + +To display post title in Table of Contents in blog posts, set `showPageTitleInTOC` +to `true` in the `[params]` section of `config.toml`. + +```toml +# config.toml + +[params] + # ... + showPageTitleInTOC = true +``` + +### Adding a new section menu + +In your site's `config.toml`, add a new menu definition for say, "photos": +```toml +# config.toml + +[[menu.main]] + identifier = "photos" + name = "photos" + title = "Photos" + url = "/photos" +``` + +Then, put your posts under "content/photos". + +### Custom styling + +You have two options for custom styling. The first is to create an `assets/scss/custom.scss` in your project and put your custom styling there. For example, the snippet below changes the dot's color on your About page to blue: + +```scss +// custom.scss +.fancy { + color: #1e88e5; +} +``` + +You can even use Hugo variables/params in your custom styles too! + +```scss +// custom.scss +.fancy { + color: {{ .Site.Params.colors.fancy | default "#1e88e5" }} +} +``` + +```toml +# config.toml +[params.colors] + fancy = "#f06292" +``` + +The second option is to use the supported scss overrides. You can do this by creating an `assets/scss/overrides/scss` file in your project. The following overrides are supported: + +```scss +// overrides.scss + +// The primary accent color used throughout the site +$primary: '' +``` + +### Tags + +Right now `hugo-theme-codex` uses the `tags` taxonomy for blog posts. You can view all the blog posts of a given tag by going to `/tags/:tag-name`, where `:tag-name` is the name of your tag. + +### i18n + +Support for [`i18n`](https://gohugo.io/functions/i18n/#readout) is currently available for the following languages: + +- English +- German + +If you would like to have another language supported, create a post in the [Discussions](https://github.com/jakewies/hugo-theme-codex/discussions) section of the repository. You may also support your language of choice by creating a `i18n/` directory in your project with a `.toml` file named after the language you are supporting. + +There are not many UI-related strings to override in this theme. If you are looking to support a language of your own, refer to [the `i18n/en.toml` file](https://github.com/jakewies/hugo-theme-codex/blob/a7800567242b6c7d3b4bd8b36dd329c3232faf5a/i18n/en.toml) to see which strings can be overridden. + +### Favicon + +To update favicon of the site, replace the one in `static/favicon.ico` with your own. + +## Contributing + +Check out the [CONTRIBUTORS.md file](https://github.com/jakewies/hugo-theme-codex/blob/master/CONTRIBUTING.md) for more info on how you can contribute! + +## Contributors ✨ + + +[![All Contributors](https://img.shields.io/badge/all_contributors-21-orange.svg?style=flat-square)](#contributors-) + + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Jake Wiesler

💻 🎨 📖

Chuxin Huang

📖 💻 🎨

Kent

💻 📖 🎨

Arushi Somani

📖

Xavier Valls

📖 💻 🎨

Pyvain

💻 📖

Justin Lebar

💻

Aareet Shermon

🎨

dgnicholson

🎨 💻

Harsh Shandilya

💻

Marco Kamner

💻

ewen

💻

Sanchith Hegde

💻 📖

Sören Johanson

💻

James Lloyd

💻

Wilson E. Husin

💻

Tom Morris

💻

Mohamad Fadhil

💻

Sam Kvale

💻

Mark C

💻

Robin

💻
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! diff --git a/blog/themes/theme/archetypes/blog.md b/blog/themes/theme/archetypes/blog.md new file mode 100644 index 0000000..c5a2205 --- /dev/null +++ b/blog/themes/theme/archetypes/blog.md @@ -0,0 +1,11 @@ +--- +title: "{{ replace .TranslationBaseName "-" " " | title }}" +date: {{ .Date }} +slug: "" +description: "" +keywords: [] +draft: true +tags: [] +math: false +toc: false +--- diff --git a/blog/themes/theme/archetypes/default.md b/blog/themes/theme/archetypes/default.md new file mode 100644 index 0000000..16791bf --- /dev/null +++ b/blog/themes/theme/archetypes/default.md @@ -0,0 +1,7 @@ +--- +title: "{{ replace .TranslationBaseName "-" " " | title }}" +description: "" +date: {{ .Date }} +keywords: [] +draft: true +--- diff --git a/blog/themes/theme/assets/js/index.js b/blog/themes/theme/assets/js/index.js new file mode 100644 index 0000000..66edd2e --- /dev/null +++ b/blog/themes/theme/assets/js/index.js @@ -0,0 +1,19 @@ +/* + * Handles mobile nav + */ + +function toggleMobileNavState() { + const body = document.querySelector("body"); + body.classList.toggle("nav--active"); +} + +/* + * Initializes burger functionality + */ + +function initBurger() { + const burger = document.querySelector(".burger"); + burger.addEventListener("click", toggleMobileNavState); +} + +initBurger(); diff --git a/blog/themes/theme/assets/scss/_main.scss b/blog/themes/theme/assets/scss/_main.scss new file mode 100644 index 0000000..971f59a --- /dev/null +++ b/blog/themes/theme/assets/scss/_main.scss @@ -0,0 +1,22 @@ +@import "partials/vars"; +@import "partials/normalize"; +@import "partials/reset"; +@import "partials/typography"; +@import "partials/nav"; +@import "partials/social-icons"; + +body.nav--active { + overflow: hidden; +} + +main { + padding: 3rem 1.5rem 1rem; + + @media screen and (min-width: $medium) { + padding-left: calc(1.5rem + #{$navWidth}); + } + + @media screen and (max-width: $medium - 1) { + padding-top: calc(3rem + #{$burgerContainerHeight}); + } +} diff --git a/blog/themes/theme/assets/scss/custom.scss b/blog/themes/theme/assets/scss/custom.scss new file mode 100644 index 0000000..e69de29 diff --git a/blog/themes/theme/assets/scss/overrides.scss b/blog/themes/theme/assets/scss/overrides.scss new file mode 100644 index 0000000..cd50bf5 --- /dev/null +++ b/blog/themes/theme/assets/scss/overrides.scss @@ -0,0 +1,4 @@ +// The following variables can be overridden + +// The primary accent color can take any css color property, including hex, named props, rgba etc. +// $primary: diff --git a/blog/themes/theme/assets/scss/pages/about.scss b/blog/themes/theme/assets/scss/pages/about.scss new file mode 100644 index 0000000..df4b0ca6 --- /dev/null +++ b/blog/themes/theme/assets/scss/pages/about.scss @@ -0,0 +1,60 @@ +@import "../main"; + +.splash-container { + height: 100%; + display: flex; + justify-content: center; + align-items: center; + font-size: 14px; + + @media screen and (min-width: $medium) { + font-size: 18px; + } +} + +.splash { + h1 { + font-size: 3em; + line-height: 1; + letter-spacing: -0.03em; + margin: 0; + } + + h2 { + font-size: 2.25em; + font-weight: 500; + line-height: 1.25; + max-width: 22em; + letter-spacing: -0.03em; + } +} + +.fancy { + color: $primary; +} + +.handle { + display: inline-block; + margin-top: 0.275em; + color: $grey; + letter-spacing: 0.5px; +} + +.writing { + text-decoration: none; + color: $primary; +} + +/* overrides */ + +main { + padding-top: 0; + padding-bottom: 0; + height: 100%; +} + +.social-icons { + justify-content: flex-start; + padding-top: 1rem; + margin-left: -0.8rem; // offset to negate icon's padding to align with text above +} diff --git a/blog/themes/theme/assets/scss/pages/post.scss b/blog/themes/theme/assets/scss/pages/post.scss new file mode 100644 index 0000000..c33c172 --- /dev/null +++ b/blog/themes/theme/assets/scss/pages/post.scss @@ -0,0 +1,241 @@ +@import "../main"; +@import "../partials/github-syntax-highlighting"; + +$tocBreakpoint: 1024px; + +/* Aligns ToC content */ +.flex-wrapper { + display: flex; +} + +.post__container { + flex-grow: 1; + min-width: 0; +} + +.post { + width: 100%; + max-width: 88rem; + margin: 0 auto; + + h2, + h3 { + position: relative; + padding-top: 10px; + + .anchor { + top: 0.5rem; + text-decoration: none; + position: absolute; + left: -1rem; + color: $grey; + font-size: 1.2rem; + font-weight: 400; + } + + .anchor:hover { + color: $darkGrey; + } + } + + blockquote { + width: 95%; + margin: 0 auto; + font-size: 1rem; + + a { + color: $darkGrey; + text-decoration: underline; + } + } + + img { + width: 100%; + max-width: 1420px; + margin: 0 auto; + display: block; + } +} + +#post__title { + margin-top: 0; + margin-bottom: 0.5rem; +} + +.post__date { + color: $grey; + font-size: 0.8rem; +} + +.post__footer { + padding-top: 3rem; +} + +.toc-container { + position: sticky; + align-self: start; + top: 3rem; + max-width: 350px; + + @media screen and (max-width: $tocBreakpoint) { + display: none; + } +} + +.toc-post-title { + font-size: 0.9rem; + margin-bottom: 0.8rem; +} + +#TableOfContents { + ul { + list-style: none; + margin: 0; + } + + a { + text-decoration: none; + color: #9b9b9b; + font-size: 0.9rem; + } + + a.active { + color: rgb(51, 51, 51); + } +} + +.tags__list { + padding-right: 1.5rem; + margin: 1.5rem 0 0; + list-style: none; + display: flex; + justify-content: flex-end; +} + +.tag__item { + margin-right: 1rem; + display: inline-block; + + &:last-child { + margin-right: 0; + } +} + +.tag__link { + display: inline-block; + text-decoration: none; + padding: 0.2em 0.4em; + border-radius: 3px; + background: lighten($primary, 41%); + color: $primary; + font-size: 0.8rem; + + &:hover { + background: lighten($primary, 38%); + } +} + +.gif { + margin-top: 1.5rem; + + img { + max-width: 375px; + } +} + +.pagination { + display: flex; + flex-direction: column; + margin-top: 1.5rem; + + @media screen and (min-width: 600px) { + flex-direction: row; + justify-content: space-between; + } +} + +.pagination__item { + text-decoration: none; + display: flex; + flex-direction: column; + + &:nth-child(2) { + margin-top: 1.5rem; + } + + @media screen and (min-width: 600px) { + width: 275px; + padding: 15px; + border-radius: 4px; + &:first-of-type { + padding-right: 15px; + } + &:last-of-type { + margin-top: 0; + } + &:hover { + background-color: #f6f9fc; + } + } +} + +.pagination__label { + color: $grey; + font-size: 0.8rem; +} + +.pagination__title { + color: $black; + font-weight: 700; + margin-top: 0.25rem; +} + +footer { + text-align: center; + padding: 0 1.5rem; + background: $white; + + p { + margin-top: 1rem; // reduce margin top due to social icons' padding + color: $grey; + font-size: 0.65rem; + } +} + +/* overrides */ +.post__content { + ul { + list-style: none; + + li { + margin-bottom: 0.5rem; + + &::before { + content: "-"; + color: $darkGrey; + position: absolute; + margin-left: -15px; + } + } + } +} + +.twitter-tweet.twitter-tweet-rendered { + margin: 1.5rem auto !important; + width: 375px !important; +} + +table { + max-width: 100%; + border-spacing: 0; + + thead { + background: $lightGrey; + } + + th, + td { + padding: 0.5em 1em; + border: 1px double $greyTableBorder; + } +} diff --git a/blog/themes/theme/assets/scss/pages/posts.scss b/blog/themes/theme/assets/scss/pages/posts.scss new file mode 100644 index 0000000..4a89154 --- /dev/null +++ b/blog/themes/theme/assets/scss/pages/posts.scss @@ -0,0 +1,57 @@ +@import "../main"; +@import "../partials/post-list"; + +.tags__list { + list-style: none; + margin: 0; + padding: 0 0 0 50px; + flex-shrink: 0; + + @media screen and (max-width: $medium - 1) { + display: none; + } +} + +.post__header .tags__list { + display: none; + padding-left: 0; + + @media screen and (max-width: $medium - 1) { + display: block; + } + + .tag__item { + display: inline-block; + margin-right: 10px; + + &:last-child { + margin-right: 0; + } + } + + .tag__link { + font-size: 0.8rem; + } +} + +.tag__link { + text-decoration: none; + color: $grey; + font-size: 0.9rem; + + &::before { + content: "#"; + font-size: 0.7rem; + padding-right: 1px; + } + + &:hover { + color: $darkGrey; + } +} + +/* page overrides */ +.post-list__container { + display: flex; + justify-content: space-between; +} diff --git a/blog/themes/theme/assets/scss/pages/tags.scss b/blog/themes/theme/assets/scss/pages/tags.scss new file mode 100644 index 0000000..3cc181f --- /dev/null +++ b/blog/themes/theme/assets/scss/pages/tags.scss @@ -0,0 +1,28 @@ +@import "../main"; +@import "../partials/post-list"; + +.tag__header { + align-items: baseline; + display: flex; + margin: 0 auto 3rem; + + a, + .separator { + color: $grey; + font-size: 1.5rem; + } + + a { + text-decoration: none; + } + + .separator { + align-self: center; + margin: 0 5px; + } + + .tag__term { + margin: 0; + font-weight: 600; + } +} diff --git a/blog/themes/theme/assets/scss/partials/_burger.scss b/blog/themes/theme/assets/scss/partials/_burger.scss new file mode 100644 index 0000000..07d4370 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_burger.scss @@ -0,0 +1,55 @@ +.burger__container { + height: $burgerContainerHeight; + display: flex; + align-items: center; + padding: 0 1.5rem; + position: fixed; + width: 100%; + background: $white; + z-index: 2; + + @media screen and (min-width: $medium) { + display: none; + } +} + +.burger { + position: relative; + width: $meatWidth; + height: $meatWidth; + cursor: pointer; +} + +.burger__meat { + position: absolute; + width: $meatWidth; + height: $meatHeight; + background: $black; + top: calc(50% - #{$meatHeight} / 2); + left: calc(50% - #{$meatWidth} / 2); + transition: all 150ms ease-in; +} + +.burger__meat--1 { + transform: translateY(-10px); +} + +.burger__meat--2 { + width: calc(#{$meatWidth} - 6px); +} + +.burger__meat--3 { + transform: translateY(10px); +} + +.nav--active .burger__meat--1 { + transform: rotate(45deg); +} + +.nav--active .burger__meat--2 { + opacity: 0; +} + +.nav--active .burger__meat--3 { + transform: rotate(-45deg); +} diff --git a/blog/themes/theme/assets/scss/partials/_github-syntax-highlighting.scss b/blog/themes/theme/assets/scss/partials/_github-syntax-highlighting.scss new file mode 100644 index 0000000..c61ab55 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_github-syntax-highlighting.scss @@ -0,0 +1,108 @@ +code[class*="language-"], +pre[class*="language-"] { + color: #24292e; + -moz-tab-size: 4; + -o-tab-size: 4; + tab-size: 4; + + -webkit-hyphens: none; + -moz-hyphens: none; + -ms-hyphens: none; + hyphens: none; +} + +.token.comment, +.token.prolog, +.token.doctype, +.token.cdata, +.token.plain-text { + color: #6a737d; +} + +.token.atrule, +.token.attr-value, +.token.keyword, +.token.operator { + color: #d73a49; +} + +.token.property, +.token.tag, +.token.boolean, +.token.number, +.token.constant, +.token.symbol, +.token.deleted { + color: #22863a; +} + +.token.selector, +.token.attr-name, +.token.string, +.token.char, +.token.builtin, +.token.inserted { + color: #032f62; +} + +.token.function, +.token.class-name { + color: #6f42c1; +} + +/* language-specific */ + +/* JSX */ +.language-jsx .token.punctuation, +.language-jsx .token.tag .token.punctuation, +.language-jsx .token.tag .token.script, +.language-jsx .token.plain-text { + color: #24292e; +} + +.language-jsx .token.tag .token.attr-name { + color: #6f42c1; +} + +.language-jsx .token.tag .token.class-name { + color: #005cc5; +} + +.language-jsx .token.tag .token.script-punctuation, +.language-jsx .token.attr-value .token.punctuation:first-child { + color: #d73a49; +} + +.language-jsx .token.attr-value { + color: #032f62; +} + +.language-jsx span[class="comment"] { + color: pink; +} + +/* HTML */ +.language-html .token.tag .token.punctuation { + color: #24292e; +} + +.language-html .token.tag .token.attr-name { + color: #6f42c1; +} + +.language-html .token.tag .token.attr-value, +.language-html + .token.tag + .token.attr-value + .token.punctuation:not(:first-child) { + color: #032f62; +} + +/* CSS */ +.language-css .token.selector { + color: #6f42c1; +} + +.language-css .token.property { + color: #005cc5; +} diff --git a/blog/themes/theme/assets/scss/partials/_nav.scss b/blog/themes/theme/assets/scss/partials/_nav.scss new file mode 100644 index 0000000..e323283 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_nav.scss @@ -0,0 +1,82 @@ +@import "burger"; + +.nav { + font-size: 16px; + position: fixed; + display: flex; + justify-content: center; + align-items: center; + background: $white; + visibility: hidden; + z-index: 1; + + @media screen and (min-width: $medium) { + display: block; + visibility: visible; + padding-top: 3em; + width: $navWidth; + } +} + +.nav--active .nav { + visibility: visible; + height: 100%; + width: 100%; + + @media screen and (min-width: $medium) { + width: $navWidth; + } +} + +.nav__list { + text-align: right; + list-style: none; + margin: 0; + padding: 0; + width: 50%; + + @media screen and (min-width: $medium) { + width: auto; + } + + @media screen and (max-width: $medium - 1) { + transform: translateY(-25px); + opacity: 0; + .nav--active & { + transform: translateY(0); + opacity: 1; + transition: all 500ms ease; + } + } +} + +.nav__list li { + margin-bottom: 3em; + line-height: 1.5em; + + &:last-of-type { + margin-bottom: 0; + } + + @media screen and (min-width: $medium) { + margin-bottom: 1.75em; + } +} + +.nav__list a { + color: $grey; + text-decoration: none; + font-size: 2em; + + &.active { + color: $black; + } + + &:hover { + color: $black; + } + + @media screen and (min-width: $medium) { + font-size: 1em; + } +} diff --git a/blog/themes/theme/assets/scss/partials/_normalize.scss b/blog/themes/theme/assets/scss/partials/_normalize.scss new file mode 100644 index 0000000..ff5581f --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_normalize.scss @@ -0,0 +1,14 @@ +button, +button[type="button"], +button[type="reset"], +button[type="submit"] { + -webkit-appearance: button; +} + +input, +input[type="text"], +input[type="email"] { + -webkit-appearance: none; + -moz-appearance: none; + appearance: none; +} diff --git a/blog/themes/theme/assets/scss/partials/_pagination.scss b/blog/themes/theme/assets/scss/partials/_pagination.scss new file mode 100644 index 0000000..d8c10d5 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_pagination.scss @@ -0,0 +1,19 @@ +.paginator-container { + position: absolute; + width: 100%; + text-align: center; +} + +.paginator { + display: inline-block; + height: 24px; + width: 24px; + margin: 0 1.5rem; + background-image: url(/svg/chevron-left.svg); + background-size: contain; + background-repeat: no-repeat; +} + +.paginator--right { + transform: rotate(180deg); +} diff --git a/blog/themes/theme/assets/scss/partials/_post-list.scss b/blog/themes/theme/assets/scss/partials/_post-list.scss new file mode 100644 index 0000000..544bf70 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_post-list.scss @@ -0,0 +1,34 @@ +.post-list__container { + margin: 0 auto; + max-width: 1200px; + width: 100%; + + @media screen and (min-width: $medium) { + padding-left: 50px; + } +} + +.post-list { + list-style: none; + margin: 0; + padding: 0; +} + +.post { + margin-bottom: 1.5rem; +} + +.post__title { + margin-top: 0; + font-weight: 500; + + a { + color: $black; + text-decoration: none; + } +} + +.post__date { + color: $grey; + font-size: 0.8rem; +} diff --git a/blog/themes/theme/assets/scss/partials/_reset.scss b/blog/themes/theme/assets/scss/partials/_reset.scss new file mode 100644 index 0000000..78ebe16 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_reset.scss @@ -0,0 +1,18 @@ +html, +body { + background-color: $white; + color: $black; + height: 100%; +} + +html { + box-sizing: border-box; +} + +*, +*:before, +*:after { + padding: 0; + margin: 0; + box-sizing: inherit; +} diff --git a/blog/themes/theme/assets/scss/partials/_social-icons.scss b/blog/themes/theme/assets/scss/partials/_social-icons.scss new file mode 100644 index 0000000..45e69cd --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_social-icons.scss @@ -0,0 +1,19 @@ +.social-icons { + display: flex; + justify-content: center; +} + +.social-icons__link { + padding: 0.8rem; + + &:not(:last-child) { + margin-right: 1em; + } + + .social-icons__icon { + width: 1.4rem; + height: 1.4rem; + background-size: contain; + background-repeat: no-repeat; + } +} diff --git a/blog/themes/theme/assets/scss/partials/_typography.scss b/blog/themes/theme/assets/scss/partials/_typography.scss new file mode 100644 index 0000000..c695b00 --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_typography.scss @@ -0,0 +1,149 @@ +$baseFontSize: 16; +$fontSizeMobile: 14; +$baseLineHeight: 1.5; +$scale: 1.414; +$leading: $baseLineHeight * 1rem; + +html { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, + Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; + -webkit-font-smoothing: antialiased; + font-size: 95%; + + @media screen and (min-width: $medium) { + font-size: 100%; + } + + @media screen and (min-width: $large) { + font-size: 115%; + } +} + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-top: $leading; + margin-bottom: 0; + line-height: $leading; +} + +h1 { + font-size: 1.5 * $scale * 1rem; + line-height: 1.5 * $leading; + margin-top: 1.5 * $leading; +} + +h2 { + font-size: $scale * 1rem; +} + +h3 { + font-size: ($scale / 1.2) * 1rem; +} + +h4 { + font-size: ($scale / 1.44) * 1rem; +} + +h5 { + font-size: ($scale / 1.728) * 1rem; +} + +p { + margin-top: $leading; + margin-bottom: 0; + line-height: $leading; +} + +ul, +ol { + padding-left: $leading; + margin-top: $leading; + margin-bottom: $leading; + + li { + line-height: $leading; + } + + ul, + ol { + margin-top: 0; + margin-bottom: 0; + } +} + +blockquote { + &::before { + position: absolute; + content: "\201C"; + + font-size: 6em; + font-family: "Roboto", serif; + margin-top: 0.1em; + margin-left: -0.2em; + + z-index: -1; + color: darken($white, 7%); + } + + margin-top: $leading; + margin-bottom: $leading; + line-height: $leading; + color: $black; + + cite { + &::before { + content: "— "; + } + + font-style: italic; + font-size: 0.95em; + color: $darkGrey; + } +} + +pre { + line-height: 1.45; + margin-top: $leading; + padding: 16px; + word-wrap: normal; + overflow: auto; + background-color: #f6f8fa; + border-radius: 3px; +} + +code { + font-size: 85%; + font-family: "SFMono-Regular", Consolas, Menlo, monospace; + padding: 0.2em 0.4em; + margin: 0; + background-color: rgba(27, 31, 35, 0.05); + border-radius: 3px; +} + +pre > code { + word-break: normal; + white-space: pre; +} + +pre code { + display: inline; + padding: 0; + margin: 0; + overflow: visible; + line-height: inherit; + word-wrap: normal; + background-color: transparent; + border: 0; +} + +.lead { + font-size: $scale * 1rem; +} + +abbr[title] { + text-decoration: underline double; +} diff --git a/blog/themes/theme/assets/scss/partials/_vars.scss b/blog/themes/theme/assets/scss/partials/_vars.scss new file mode 100644 index 0000000..c3c06ac --- /dev/null +++ b/blog/themes/theme/assets/scss/partials/_vars.scss @@ -0,0 +1,21 @@ +$navWidth: 100px; +$meatWidth: 28px; +$meatHeight: 2px; +$burgerContainerHeight: 4rem; + +// colors +$black: #111; +$lightGrey: #f7f7f7; +$greyTableBorder: #eeeeee; +$grey: #9b9b9b; +$darkGrey: #717171; +$white: #fff; +$primary: #9013fe; + +// screenSizes +$medium: 800px; +$large: 1400px; + +// import site overrides after variables after +// variables have been declared +@import "../overrides"; diff --git a/blog/themes/theme/exampleSite/config.toml b/blog/themes/theme/exampleSite/config.toml new file mode 100644 index 0000000..be77136 --- /dev/null +++ b/blog/themes/theme/exampleSite/config.toml @@ -0,0 +1,74 @@ +# REMOVE THIS +themesDir = "../../" + +# DO NOT REMOVE THIS +theme = "hugo-theme-codex" + +# Override these settings with your own +title = "codex" +languageCode = "en-us" +baseURL = "https://example.org/" +copyright = "© {year}" + +# Add your Disqus shortname here. +# disqusShortname = "" + +# Add your Google Analytics identifier: UA-XXXXXXXX-X +# googleAnalytics = "" + +# Optional params +[params] + # Follow the Hugo date/time format reference here: + # https://gohugo.io/functions/format/#gos-layout-string + dateFormat = "Jan 2 2006" + + # Links to your social accounts, comment/uncomment as needed. Icons will be displayed for those specified. + twitter = "https://twitter.com/" + github = "https://github.com/" + # email = "mailto:" + # mastodon = "https://mastodon.social/@nickname" + # facebook = "https://facebook.com/" + # gitlab = "https://gitlab.com/" + # instagram = "https://instagram.com/" + # linkedin = "" + # youtube = "https://www.youtube.com/channel/" + + # Titles for your icons (shown as tooltips), and also their display order. + # Currently, these icons are supported: + # "Twitter", "GitHub", "Email", "Mastodon", "Facebook", "GitLab", "Instagram", "LinkedIn", "YouTube" + iconOrder = ["Twitter", "GitHub"] + + # Metadata for Twitter cards, defaults to params.twitter + # twitterSite = "@" + # twitterAuthor = "@" + + # Set to true to display page title in table of contents in blog posts. + showPageTitleInTOC = false + +# This disables Hugo's default syntax highlighting in favor +# of prismjs. If you wish to use Hugo's default syntax highlighting +# over prismjs, remove this. You will also need to remove the prismjs +# vendor script in layouts/blog/single.html. +[markup] + [markup.highlight] + codeFences = false + + # Set to false to disallow raw HTML in markdown files + [markup.goldmark.renderer] + unsafe = true + +# Controls the navigation +[[menu.main]] + identifier = "about" + name = "about" + title = "About" + url = "/" + +[[menu.main]] + identifier = "blog" + name = "blog" + title = "Blog" + url = "/blog" + + + diff --git a/blog/themes/theme/exampleSite/content/_index.md b/blog/themes/theme/exampleSite/content/_index.md new file mode 100644 index 0000000..fa5220b --- /dev/null +++ b/blog/themes/theme/exampleSite/content/_index.md @@ -0,0 +1,7 @@ +--- +heading: "Hi, I'm Codex" +subheading: "A minimal blog theme for hugo." +handle: "hugo-theme-codex" +--- + +hello diff --git a/blog/themes/theme/exampleSite/content/blog/example-post.md b/blog/themes/theme/exampleSite/content/blog/example-post.md new file mode 100644 index 0000000..5f8b62b --- /dev/null +++ b/blog/themes/theme/exampleSite/content/blog/example-post.md @@ -0,0 +1,23 @@ +--- +title: "Welcome To Codex" +date: 2020-06-04T09:19:29-04:00 +slug: "example-post" +description: "This is an example post for hugo-theme-codex." +keywords: ["gohugo", "hugo", "go", "blog"] +draft: false +tags: ["hugo"] +math: false +toc: true +--- + +## The standard Lorem Ipsum passage + +"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." + +## written by Cicero in 45 BC + +"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?" + +## 1914 translation by H. Rackham + +"But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?" diff --git a/blog/themes/theme/exampleSite/content/blog/markdown-syntax.md b/blog/themes/theme/exampleSite/content/blog/markdown-syntax.md new file mode 100644 index 0000000..5dea346 --- /dev/null +++ b/blog/themes/theme/exampleSite/content/blog/markdown-syntax.md @@ -0,0 +1,156 @@ +--- +title: "Markdown Syntax Guide" +date: 2020-06-05 +slug: "markdown-syntax-guide" +description: "Sample article showcasing basic Markdown syntax and formatting for HTML elements" +keywords: ["gohugo", "hugo", "go", "blog"] +draft: false +tags: ["markdown", "css", "html", "themes"] +math: false +toc: false +--- + +This article offers a sample of basic Markdown syntax that can be used in Hugo content files, also it shows whether basic HTML elements are decorated with CSS in a Hugo theme. + + +## Headings + +The following HTML `

`—`

` elements represent six levels of section headings. `

` is the highest section level while `

` is the lowest. + +# H1 +## H2 +### H3 +#### H4 +##### H5 +###### H6 + +## Paragraph + +Xerum, quo qui aut unt expliquam qui dolut labo. Aque venitatiusda cum, voluptionse latur sitiae dolessi aut parist aut dollo enim qui voluptate ma dolestendit peritin re plis aut quas inctum laceat est volestemque commosa as cus endigna tectur, offic to cor sequas etum rerum idem sintibus eiur? Quianimin porecus evelectur, cum que nis nust voloribus ratem aut omnimi, sitatur? Quiatem. Nam, omnis sum am facea corem alique molestrunt et eos evelece arcillit ut aut eos eos nus, sin conecerem erum fuga. Ri oditatquam, ad quibus unda veliamenimin cusam et facea ipsamus es exerum sitate dolores editium rerore eost, temped molorro ratiae volorro te reribus dolorer sperchicium faceata tiustia prat. + +Itatur? Quiatae cullecum rem ent aut odis in re eossequodi nonsequ idebis ne sapicia is sinveli squiatum, core et que aut hariosam ex eat. + +## Blockquotes + +The blockquote element represents content that is quoted from another source, optionally with a citation which must be within a `footer` or `cite` element, and optionally with in-line changes such as annotations and abbreviations. + +#### Blockquote without attribution + +> Tiam, ad mint andaepu dandae nostion secatur sequo quae. +> **Note** that you can use *Markdown syntax* within a blockquote. + +#### Blockquote with attribution + +> Simplicity is the ultimate sophistication. +> Leonardo da Vinci[^1] + +[^1]: The above quote is often attributed to Leonardo da Vinci but there is no concrete evidence to support this. + + +## Tables + +Tables aren't part of the core Markdown spec, but Hugo supports supports them out-of-the-box. + + Name | Age +--------|------ + Bob | 27 + Alice | 23 + +#### Inline Markdown within tables + +
+ +| Inline    | Markdown    | In    | Table | +| ---------- | --------- | ----------------- | ---------- | +| *italics* | **bold** | ~~strikethrough~~    | `code` | + +
+ +## Code Blocks + +#### Code block with backticks + +```html + + + + + Example HTML5 Document + + +

Test

+ + +``` +#### Code block indented with four spaces + + + + + + Example HTML5 Document + + +

Test

+ + + +#### Code block with Hugo's internal highlight shortcode +```html + + + + + Example HTML5 Document + + +

Test

+ + +``` + +#### Wide code block +```html + + + + + Example HTML5 Document + + +

Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.

+ + +``` + +## List Types + +#### Ordered List + +1. First item +2. Second item +3. Third item + +#### Unordered List + +* List item +* Another item +* And another item + +#### Nested list + +* Item +1. First Sub-item +2. Second Sub-item + +## Other Elements — abbr, sub, sup, kbd, mark + +GIF is a bitmap image format. + +H2O + +Xn + Yn = Zn + +Press CTRL+ALT+Delete to end the session. + +Most salamanders are nocturnal, and hunt for insects, worms, and other small creatures. diff --git a/blog/themes/theme/exampleSite/content/blog/math-typesetting.md b/blog/themes/theme/exampleSite/content/blog/math-typesetting.md new file mode 100644 index 0000000..d7ce771 --- /dev/null +++ b/blog/themes/theme/exampleSite/content/blog/math-typesetting.md @@ -0,0 +1,48 @@ +--- +title: "Math Typesetting" +date: 2020-06-05 +slug: "math-typesetting" +description: "A brief guide to setup KaTeX" +keywords: ["gohugo", "hugo", "go", "blog"] +draft: false +tags: ["math"] +math: true +toc: false +--- + +Mathematical notation in a Hugo project can be enabled by using third party JavaScript libraries. + + +In this example we will be using [KaTeX](https://katex.org/) + +- Create a partial under `/layouts/partials/math.html` +- Within this partial reference the [Auto-render Extension](https://katex.org/docs/autorender.html) or host these scripts locally. +- Include the partial in your templates like so: + +``` +{{ if or .Params.math .Site.Params.math }} +{{ partial "math.html" . }} +{{ end }} +``` +- To enable KaTex globally set the parameter `math` to `true` in a project's configuration +- To enable KaTex on a per page basis include the parameter `math: true` in content files. + +**Note:** Use the online reference of [Supported TeX Functions](https://katex.org/docs/supported.html) +{{< math.inline >}} +{{ if or .Page.Params.math .Site.Params.math }} + + + + +{{ end }} +{{}} + +### Examples + +Inline math: $$ \varphi = \dfrac{1+\sqrt5}{2}= 1.6180339887… $$ + +Block math: + +$$ +\sigma(t) = \cfrac{1}{1 + e^{-t}} +$$ diff --git a/blog/themes/theme/exampleSite/content/blog/rich-content.md b/blog/themes/theme/exampleSite/content/blog/rich-content.md new file mode 100644 index 0000000..c69a34b --- /dev/null +++ b/blog/themes/theme/exampleSite/content/blog/rich-content.md @@ -0,0 +1,32 @@ +--- +title: "Rich Content" +date: 2020-06-05 +slug: "rich-text" +description: "A brief description of Hugo Shortcodes" +keywords: ["gohugo", "hugo", "go", "blog"] +draft: false +tags: ["shortcodes"] +math: false +toc: false +--- + +Hugo ships with several [Built-in Shortcodes](https://gohugo.io/content-management/shortcodes/#use-hugo-s-built-in-shortcodes) for rich content, along with a [Privacy Config](https://gohugo.io/about/hugo-and-gdpr/) and a set of Simple Shortcodes that enable static and no-JS versions of various social media embeds. + +## Instagram Simple Shortcode + +{{< instagram_simple BGvuInzyFAe hidecaption >}} + + +## YouTube Privacy Enhanced Shortcode + +{{< youtube ZJthWmvUzzc >}} + + +## Twitter Simple Shortcode + +{{< tweet 1085870671291310081 >}} + + +## Vimeo Simple Shortcode + +{{< vimeo_simple 48912912 >}} diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/custom.scss_3397a22b18c3bef8be8c21c341bc8237.content b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/custom.scss_3397a22b18c3bef8be8c21c341bc8237.content new file mode 100644 index 0000000..e69de29 diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/custom.scss_3397a22b18c3bef8be8c21c341bc8237.json b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/custom.scss_3397a22b18c3bef8be8c21c341bc8237.json new file mode 100644 index 0000000..de94e08 --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/custom.scss_3397a22b18c3bef8be8c21c341bc8237.json @@ -0,0 +1 @@ +{"Target":"css/custom.min.e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.css","MediaType":"text/css","Data":{"Integrity":"sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU="}} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/about.scss_aba0dac60eb4049f68f83f39827d1b50.content b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/about.scss_aba0dac60eb4049f68f83f39827d1b50.content new file mode 100644 index 0000000..47f3489 --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/about.scss_aba0dac60eb4049f68f83f39827d1b50.content @@ -0,0 +1 @@ +@charset "UTF-8";button,button[type=button],button[type=reset],button[type=submit]{-webkit-appearance:button}input,input[type=text],input[type=email]{-webkit-appearance:none;-moz-appearance:none;appearance:none}html,body{background-color:#fff;color:#111;height:100%}html{box-sizing:border-box}*,*:before,*:after{padding:0;margin:0;box-sizing:inherit}html{font-family:-apple-system,BlinkMacSystemFont,segoe ui,Roboto,Oxygen-Sans,Ubuntu,Cantarell,helvetica neue,sans-serif;-webkit-font-smoothing:antialiased;font-size:95%}@media screen and (min-width:800px){html{font-size:100%}}@media screen and (min-width:1400px){html{font-size:115%}}h1,h2,h3,h4,h5,h6{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}h1{font-size:2.121rem;line-height:2.25rem;margin-top:2.25rem}h2{font-size:1.414rem}h3{font-size:1.17833333rem}h4{font-size:.98194444rem}h5{font-size:.81828704rem}p{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}ul,ol{padding-left:1.5rem;margin-top:1.5rem;margin-bottom:1.5rem}ul li,ol li{line-height:1.5rem}ul ul,ul ol,ol ul,ol ol{margin-top:0;margin-bottom:0}blockquote{margin-top:1.5rem;margin-bottom:1.5rem;line-height:1.5rem;color:#111}blockquote::before{position:absolute;content:"\201C";font-size:6em;font-family:roboto,serif;margin-top:.1em;margin-left:-.2em;z-index:-1;color:#ededed}blockquote cite{font-style:italic;font-size:.95em;color:#717171}blockquote cite::before{content:"— "}pre{line-height:1.45;margin-top:1.5rem;padding:16px;word-wrap:normal;overflow:auto;background-color:#f6f8fa;border-radius:3px}code{font-size:85%;font-family:sfmono-regular,Consolas,Menlo,monospace;padding:.2em .4em;margin:0;background-color:rgba(27,31,35,.05);border-radius:3px}pre>code{word-break:normal;white-space:pre}pre code{display:inline;padding:0;margin:0;overflow:visible;line-height:inherit;word-wrap:normal;background-color:transparent;border:0}.lead{font-size:1.414rem}abbr[title]{text-decoration:underline double}.burger__container{height:4rem;display:flex;align-items:center;padding:0 1.5rem;position:fixed;width:100%;background:#fff;z-index:2}@media screen and (min-width:800px){.burger__container{display:none}}.burger{position:relative;width:28px;height:28px;cursor:pointer}.burger__meat{position:absolute;width:28px;height:2px;background:#111;top:calc(50% - 2px/2);left:calc(50% - 28px/2);transition:all 150ms ease-in}.burger__meat--1{transform:translateY(-10px)}.burger__meat--2{width:calc(28px - 6px)}.burger__meat--3{transform:translateY(10px)}.nav--active .burger__meat--1{transform:rotate(45deg)}.nav--active .burger__meat--2{opacity:0}.nav--active .burger__meat--3{transform:rotate(-45deg)}.nav{font-size:16px;position:fixed;display:flex;justify-content:center;align-items:center;background:#fff;visibility:hidden;z-index:1}@media screen and (min-width:800px){.nav{display:block;visibility:visible;padding-top:3em;width:100px}}.nav--active .nav{visibility:visible;height:100%;width:100%}@media screen and (min-width:800px){.nav--active .nav{width:100px}}.nav__list{text-align:right;list-style:none;margin:0;padding:0;width:50%}@media screen and (min-width:800px){.nav__list{width:auto}}@media screen and (max-width:799px){.nav__list{transform:translateY(-25px);opacity:0}.nav--active .nav__list{transform:translateY(0);opacity:1;transition:all 500ms ease}}.nav__list li{margin-bottom:3em;line-height:1.5em}.nav__list li:last-of-type{margin-bottom:0}@media screen and (min-width:800px){.nav__list li{margin-bottom:1.75em}}.nav__list a{color:#9b9b9b;text-decoration:none;font-size:2em}.nav__list a.active{color:#111}.nav__list a:hover{color:#111}@media screen and (min-width:800px){.nav__list a{font-size:1em}}.social-icons{display:flex;justify-content:center}.social-icons__link{padding:.8rem}.social-icons__link:not(:last-child){margin-right:1em}.social-icons__link .social-icons__icon{width:1.4rem;height:1.4rem;background-size:contain;background-repeat:no-repeat}body.nav--active{overflow:hidden}main{padding:3rem 1.5rem 1rem}@media screen and (min-width:800px){main{padding-left:calc(1.5rem + 100px)}}@media screen and (max-width:799px){main{padding-top:calc(3rem + 4rem)}}.splash-container{height:100%;display:flex;justify-content:center;align-items:center;font-size:14px}@media screen and (min-width:800px){.splash-container{font-size:18px}}.splash h1{font-size:3em;line-height:1;letter-spacing:-.03em;margin:0}.splash h2{font-size:2.25em;font-weight:500;line-height:1.25;max-width:22em;letter-spacing:-.03em}.fancy{color:#9013fe}.handle{display:inline-block;margin-top:.275em;color:#9b9b9b;letter-spacing:.5px}.writing{text-decoration:none;color:#9013fe}main{padding-top:0;padding-bottom:0;height:100%}.social-icons{justify-content:flex-start;padding-top:1rem;margin-left:-.8rem} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/about.scss_aba0dac60eb4049f68f83f39827d1b50.json b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/about.scss_aba0dac60eb4049f68f83f39827d1b50.json new file mode 100644 index 0000000..5a679b8 --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/about.scss_aba0dac60eb4049f68f83f39827d1b50.json @@ -0,0 +1 @@ +{"Target":"css/about.min.faa0cdf2d211fbe1d8399c340d898781f94b8e9a9d0e53657a5b8bf7c13e990b.css","MediaType":"text/css","Data":{"Integrity":"sha256-+qDN8tIR++HYOZw0DYmHgflLjpqdDlNleluL98E+mQs="}} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/post.scss_9b5a08989dabcb86a5d520fc24aa3741.content b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/post.scss_9b5a08989dabcb86a5d520fc24aa3741.content new file mode 100644 index 0000000..57ec034 --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/post.scss_9b5a08989dabcb86a5d520fc24aa3741.content @@ -0,0 +1 @@ +@charset "UTF-8";button,button[type=button],button[type=reset],button[type=submit]{-webkit-appearance:button}input,input[type=text],input[type=email]{-webkit-appearance:none;-moz-appearance:none;appearance:none}html,body{background-color:#fff;color:#111;height:100%}html{box-sizing:border-box}*,*:before,*:after{padding:0;margin:0;box-sizing:inherit}html{font-family:-apple-system,BlinkMacSystemFont,segoe ui,Roboto,Oxygen-Sans,Ubuntu,Cantarell,helvetica neue,sans-serif;-webkit-font-smoothing:antialiased;font-size:95%}@media screen and (min-width:800px){html{font-size:100%}}@media screen and (min-width:1400px){html{font-size:115%}}h1,h2,h3,h4,h5,h6{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}h1{font-size:2.121rem;line-height:2.25rem;margin-top:2.25rem}h2{font-size:1.414rem}h3{font-size:1.17833333rem}h4{font-size:.98194444rem}h5{font-size:.81828704rem}p{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}ul,ol{padding-left:1.5rem;margin-top:1.5rem;margin-bottom:1.5rem}ul li,ol li{line-height:1.5rem}ul ul,ul ol,ol ul,ol ol{margin-top:0;margin-bottom:0}blockquote{margin-top:1.5rem;margin-bottom:1.5rem;line-height:1.5rem;color:#111}blockquote::before{position:absolute;content:"\201C";font-size:6em;font-family:roboto,serif;margin-top:.1em;margin-left:-.2em;z-index:-1;color:#ededed}blockquote cite{font-style:italic;font-size:.95em;color:#717171}blockquote cite::before{content:"— "}pre{line-height:1.45;margin-top:1.5rem;padding:16px;word-wrap:normal;overflow:auto;background-color:#f6f8fa;border-radius:3px}code{font-size:85%;font-family:sfmono-regular,Consolas,Menlo,monospace;padding:.2em .4em;margin:0;background-color:rgba(27,31,35,.05);border-radius:3px}pre>code{word-break:normal;white-space:pre}pre code{display:inline;padding:0;margin:0;overflow:visible;line-height:inherit;word-wrap:normal;background-color:transparent;border:0}.lead{font-size:1.414rem}abbr[title]{text-decoration:underline double}.burger__container{height:4rem;display:flex;align-items:center;padding:0 1.5rem;position:fixed;width:100%;background:#fff;z-index:2}@media screen and (min-width:800px){.burger__container{display:none}}.burger{position:relative;width:28px;height:28px;cursor:pointer}.burger__meat{position:absolute;width:28px;height:2px;background:#111;top:calc(50% - 2px/2);left:calc(50% - 28px/2);transition:all 150ms ease-in}.burger__meat--1{transform:translateY(-10px)}.burger__meat--2{width:calc(28px - 6px)}.burger__meat--3{transform:translateY(10px)}.nav--active .burger__meat--1{transform:rotate(45deg)}.nav--active .burger__meat--2{opacity:0}.nav--active .burger__meat--3{transform:rotate(-45deg)}.nav{font-size:16px;position:fixed;display:flex;justify-content:center;align-items:center;background:#fff;visibility:hidden;z-index:1}@media screen and (min-width:800px){.nav{display:block;visibility:visible;padding-top:3em;width:100px}}.nav--active .nav{visibility:visible;height:100%;width:100%}@media screen and (min-width:800px){.nav--active .nav{width:100px}}.nav__list{text-align:right;list-style:none;margin:0;padding:0;width:50%}@media screen and (min-width:800px){.nav__list{width:auto}}@media screen and (max-width:799px){.nav__list{transform:translateY(-25px);opacity:0}.nav--active .nav__list{transform:translateY(0);opacity:1;transition:all 500ms ease}}.nav__list li{margin-bottom:3em;line-height:1.5em}.nav__list li:last-of-type{margin-bottom:0}@media screen and (min-width:800px){.nav__list li{margin-bottom:1.75em}}.nav__list a{color:#9b9b9b;text-decoration:none;font-size:2em}.nav__list a.active{color:#111}.nav__list a:hover{color:#111}@media screen and (min-width:800px){.nav__list a{font-size:1em}}.social-icons{display:flex;justify-content:center}.social-icons__link{padding:.8rem}.social-icons__link:not(:last-child){margin-right:1em}.social-icons__link .social-icons__icon{width:1.4rem;height:1.4rem;background-size:contain;background-repeat:no-repeat}body.nav--active{overflow:hidden}main{padding:3rem 1.5rem 1rem}@media screen and (min-width:800px){main{padding-left:calc(1.5rem + 100px)}}@media screen and (max-width:799px){main{padding-top:calc(3rem + 4rem)}}code[class*=language-],pre[class*=language-]{color:#24292e;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-moz-hyphens:none;-ms-hyphens:none;hyphens:none}.token.comment,.token.prolog,.token.doctype,.token.cdata,.token.plain-text{color:#6a737d}.token.atrule,.token.attr-value,.token.keyword,.token.operator{color:#d73a49}.token.property,.token.tag,.token.boolean,.token.number,.token.constant,.token.symbol,.token.deleted{color:#22863a}.token.selector,.token.attr-name,.token.string,.token.char,.token.builtin,.token.inserted{color:#032f62}.token.function,.token.class-name{color:#6f42c1}.language-jsx .token.punctuation,.language-jsx .token.tag .token.punctuation,.language-jsx .token.tag .token.script,.language-jsx .token.plain-text{color:#24292e}.language-jsx .token.tag .token.attr-name{color:#6f42c1}.language-jsx .token.tag .token.class-name{color:#005cc5}.language-jsx .token.tag .token.script-punctuation,.language-jsx .token.attr-value .token.punctuation:first-child{color:#d73a49}.language-jsx .token.attr-value{color:#032f62}.language-jsx span[class=comment]{color:pink}.language-html .token.tag .token.punctuation{color:#24292e}.language-html .token.tag .token.attr-name{color:#6f42c1}.language-html .token.tag .token.attr-value,.language-html .token.tag .token.attr-value .token.punctuation:not(:first-child){color:#032f62}.language-css .token.selector{color:#6f42c1}.language-css .token.property{color:#005cc5}.flex-wrapper{display:flex}.post__container{flex-grow:1;min-width:0}.post{width:100%;max-width:34rem;margin:0 auto}.post h2,.post h3{position:relative;padding-top:10px}.post h2 .anchor,.post h3 .anchor{top:.5rem;text-decoration:none;position:absolute;left:-1rem;color:#9b9b9b;font-size:1.2rem;font-weight:400}.post h2 .anchor:hover,.post h3 .anchor:hover{color:#717171}.post blockquote{width:95%;margin:0 auto;font-size:1rem}.post blockquote a{color:#717171;text-decoration:underline}.post img{width:100%;max-width:500px;margin:0 auto;display:block}#post__title{margin-top:0;margin-bottom:.5rem}.post__date{color:#9b9b9b;font-size:.8rem}.post__footer{padding-top:3rem}.toc-container{position:sticky;align-self:start;top:3rem;max-width:350px}@media screen and (max-width:1024px){.toc-container{display:none}}.toc-post-title{font-size:.9rem;margin-bottom:.8rem}#TableOfContents ul{list-style:none;margin:0}#TableOfContents a{text-decoration:none;color:#9b9b9b;font-size:.9rem}#TableOfContents a.active{color:#333}.tags__list{padding-right:1.5rem;margin:1.5rem 0 0;list-style:none;display:flex;justify-content:flex-end}.tag__item{margin-right:1rem;display:inline-block}.tag__item:last-child{margin-right:0}.tag__link{display:inline-block;text-decoration:none;padding:.2em .4em;border-radius:3px;background:#f2e3ff;color:#9013fe;font-size:.8rem}.tag__link:hover{background:#ebd4ff}.gif{margin-top:1.5rem}.gif img{max-width:375px}.pagination{display:flex;flex-direction:column;margin-top:1.5rem}@media screen and (min-width:600px){.pagination{flex-direction:row;justify-content:space-between}}.pagination__item{text-decoration:none;display:flex;flex-direction:column}.pagination__item:nth-child(2){margin-top:1.5rem}@media screen and (min-width:600px){.pagination__item{width:275px;padding:15px;border-radius:4px}.pagination__item:first-of-type{padding-right:15px}.pagination__item:last-of-type{margin-top:0}.pagination__item:hover{background-color:#f6f9fc}}.pagination__label{color:#9b9b9b;font-size:.8rem}.pagination__title{color:#111;font-weight:700;margin-top:.25rem}footer{text-align:center;padding:0 1.5rem;background:#fff}footer p{margin-top:1rem;color:#9b9b9b;font-size:.65rem}.post__content ul{list-style:none}.post__content ul li{margin-bottom:.5rem}.post__content ul li::before{content:"-";color:#717171;position:absolute;margin-left:-15px}.twitter-tweet.twitter-tweet-rendered{margin:1.5rem auto!important;width:375px!important}table{max-width:100%;border-spacing:0}table thead{background:#f7f7f7}table th,table td{padding:.5em 1em;border:1px double #eee} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/post.scss_9b5a08989dabcb86a5d520fc24aa3741.json b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/post.scss_9b5a08989dabcb86a5d520fc24aa3741.json new file mode 100644 index 0000000..b14a4dc --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/post.scss_9b5a08989dabcb86a5d520fc24aa3741.json @@ -0,0 +1 @@ +{"Target":"css/post.min.b60e0932fe1c50c3d7c5b4f83ee9e4592363654d0f2abf05bbd0678d5b8a214c.css","MediaType":"text/css","Data":{"Integrity":"sha256-tg4JMv4cUMPXxbT4PunkWSNjZU0PKr8Fu9BnjVuKIUw="}} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/posts.scss_45b5651bc7f84edbd12295da47cc9fcd.content b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/posts.scss_45b5651bc7f84edbd12295da47cc9fcd.content new file mode 100644 index 0000000..f81ccba --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/posts.scss_45b5651bc7f84edbd12295da47cc9fcd.content @@ -0,0 +1 @@ +@charset "UTF-8";button,button[type=button],button[type=reset],button[type=submit]{-webkit-appearance:button}input,input[type=text],input[type=email]{-webkit-appearance:none;-moz-appearance:none;appearance:none}html,body{background-color:#fff;color:#111;height:100%}html{box-sizing:border-box}*,*:before,*:after{padding:0;margin:0;box-sizing:inherit}html{font-family:-apple-system,BlinkMacSystemFont,segoe ui,Roboto,Oxygen-Sans,Ubuntu,Cantarell,helvetica neue,sans-serif;-webkit-font-smoothing:antialiased;font-size:95%}@media screen and (min-width:800px){html{font-size:100%}}@media screen and (min-width:1400px){html{font-size:115%}}h1,h2,h3,h4,h5,h6{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}h1{font-size:2.121rem;line-height:2.25rem;margin-top:2.25rem}h2{font-size:1.414rem}h3{font-size:1.17833333rem}h4{font-size:.98194444rem}h5{font-size:.81828704rem}p{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}ul,ol{padding-left:1.5rem;margin-top:1.5rem;margin-bottom:1.5rem}ul li,ol li{line-height:1.5rem}ul ul,ul ol,ol ul,ol ol{margin-top:0;margin-bottom:0}blockquote{margin-top:1.5rem;margin-bottom:1.5rem;line-height:1.5rem;color:#111}blockquote::before{position:absolute;content:"\201C";font-size:6em;font-family:roboto,serif;margin-top:.1em;margin-left:-.2em;z-index:-1;color:#ededed}blockquote cite{font-style:italic;font-size:.95em;color:#717171}blockquote cite::before{content:"— "}pre{line-height:1.45;margin-top:1.5rem;padding:16px;word-wrap:normal;overflow:auto;background-color:#f6f8fa;border-radius:3px}code{font-size:85%;font-family:sfmono-regular,Consolas,Menlo,monospace;padding:.2em .4em;margin:0;background-color:rgba(27,31,35,.05);border-radius:3px}pre>code{word-break:normal;white-space:pre}pre code{display:inline;padding:0;margin:0;overflow:visible;line-height:inherit;word-wrap:normal;background-color:transparent;border:0}.lead{font-size:1.414rem}abbr[title]{text-decoration:underline double}.burger__container{height:4rem;display:flex;align-items:center;padding:0 1.5rem;position:fixed;width:100%;background:#fff;z-index:2}@media screen and (min-width:800px){.burger__container{display:none}}.burger{position:relative;width:28px;height:28px;cursor:pointer}.burger__meat{position:absolute;width:28px;height:2px;background:#111;top:calc(50% - 2px/2);left:calc(50% - 28px/2);transition:all 150ms ease-in}.burger__meat--1{transform:translateY(-10px)}.burger__meat--2{width:calc(28px - 6px)}.burger__meat--3{transform:translateY(10px)}.nav--active .burger__meat--1{transform:rotate(45deg)}.nav--active .burger__meat--2{opacity:0}.nav--active .burger__meat--3{transform:rotate(-45deg)}.nav{font-size:16px;position:fixed;display:flex;justify-content:center;align-items:center;background:#fff;visibility:hidden;z-index:1}@media screen and (min-width:800px){.nav{display:block;visibility:visible;padding-top:3em;width:100px}}.nav--active .nav{visibility:visible;height:100%;width:100%}@media screen and (min-width:800px){.nav--active .nav{width:100px}}.nav__list{text-align:right;list-style:none;margin:0;padding:0;width:50%}@media screen and (min-width:800px){.nav__list{width:auto}}@media screen and (max-width:799px){.nav__list{transform:translateY(-25px);opacity:0}.nav--active .nav__list{transform:translateY(0);opacity:1;transition:all 500ms ease}}.nav__list li{margin-bottom:3em;line-height:1.5em}.nav__list li:last-of-type{margin-bottom:0}@media screen and (min-width:800px){.nav__list li{margin-bottom:1.75em}}.nav__list a{color:#9b9b9b;text-decoration:none;font-size:2em}.nav__list a.active{color:#111}.nav__list a:hover{color:#111}@media screen and (min-width:800px){.nav__list a{font-size:1em}}.social-icons{display:flex;justify-content:center}.social-icons__link{padding:.8rem}.social-icons__link:not(:last-child){margin-right:1em}.social-icons__link .social-icons__icon{width:1.4rem;height:1.4rem;background-size:contain;background-repeat:no-repeat}body.nav--active{overflow:hidden}main{padding:3rem 1.5rem 1rem}@media screen and (min-width:800px){main{padding-left:calc(1.5rem + 100px)}}@media screen and (max-width:799px){main{padding-top:calc(3rem + 4rem)}}.post-list__container{margin:0 auto;max-width:1200px;width:100%}@media screen and (min-width:800px){.post-list__container{padding-left:50px}}.post-list{list-style:none;margin:0;padding:0}.post{margin-bottom:1.5rem}.post__title{margin-top:0;font-weight:500}.post__title a{color:#111;text-decoration:none}.post__date{color:#9b9b9b;font-size:.8rem}.tags__list{list-style:none;margin:0;padding:0 0 0 50px;flex-shrink:0}@media screen and (max-width:799px){.tags__list{display:none}}.post__header .tags__list{display:none;padding-left:0}@media screen and (max-width:799px){.post__header .tags__list{display:block}}.post__header .tags__list .tag__item{display:inline-block;margin-right:10px}.post__header .tags__list .tag__item:last-child{margin-right:0}.post__header .tags__list .tag__link{font-size:.8rem}.tag__link{text-decoration:none;color:#9b9b9b;font-size:.9rem}.tag__link::before{content:"#";font-size:.7rem;padding-right:1px}.tag__link:hover{color:#717171}.post-list__container{display:flex;justify-content:space-between} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/posts.scss_45b5651bc7f84edbd12295da47cc9fcd.json b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/posts.scss_45b5651bc7f84edbd12295da47cc9fcd.json new file mode 100644 index 0000000..f1dc864 --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/posts.scss_45b5651bc7f84edbd12295da47cc9fcd.json @@ -0,0 +1 @@ +{"Target":"css/posts.min.9d00414be708b07c685f180cfce5239fac4e85078a970260044342d7422f18f8.css","MediaType":"text/css","Data":{"Integrity":"sha256-nQBBS+cIsHxoXxgM/OUjn6xOhQeKlwJgBENC10IvGPg="}} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/tags.scss_f297ce3c5afa481cf9ec4d5576b36b22.content b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/tags.scss_f297ce3c5afa481cf9ec4d5576b36b22.content new file mode 100644 index 0000000..c983913 --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/tags.scss_f297ce3c5afa481cf9ec4d5576b36b22.content @@ -0,0 +1 @@ +@charset "UTF-8";button,button[type=button],button[type=reset],button[type=submit]{-webkit-appearance:button}input,input[type=text],input[type=email]{-webkit-appearance:none;-moz-appearance:none;appearance:none}html,body{background-color:#fff;color:#111;height:100%}html{box-sizing:border-box}*,*:before,*:after{padding:0;margin:0;box-sizing:inherit}html{font-family:-apple-system,BlinkMacSystemFont,segoe ui,Roboto,Oxygen-Sans,Ubuntu,Cantarell,helvetica neue,sans-serif;-webkit-font-smoothing:antialiased;font-size:95%}@media screen and (min-width:800px){html{font-size:100%}}@media screen and (min-width:1400px){html{font-size:115%}}h1,h2,h3,h4,h5,h6{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}h1{font-size:2.121rem;line-height:2.25rem;margin-top:2.25rem}h2{font-size:1.414rem}h3{font-size:1.17833333rem}h4{font-size:.98194444rem}h5{font-size:.81828704rem}p{margin-top:1.5rem;margin-bottom:0;line-height:1.5rem}ul,ol{padding-left:1.5rem;margin-top:1.5rem;margin-bottom:1.5rem}ul li,ol li{line-height:1.5rem}ul ul,ul ol,ol ul,ol ol{margin-top:0;margin-bottom:0}blockquote{margin-top:1.5rem;margin-bottom:1.5rem;line-height:1.5rem;color:#111}blockquote::before{position:absolute;content:"\201C";font-size:6em;font-family:roboto,serif;margin-top:.1em;margin-left:-.2em;z-index:-1;color:#ededed}blockquote cite{font-style:italic;font-size:.95em;color:#717171}blockquote cite::before{content:"— "}pre{line-height:1.45;margin-top:1.5rem;padding:16px;word-wrap:normal;overflow:auto;background-color:#f6f8fa;border-radius:3px}code{font-size:85%;font-family:sfmono-regular,Consolas,Menlo,monospace;padding:.2em .4em;margin:0;background-color:rgba(27,31,35,.05);border-radius:3px}pre>code{word-break:normal;white-space:pre}pre code{display:inline;padding:0;margin:0;overflow:visible;line-height:inherit;word-wrap:normal;background-color:transparent;border:0}.lead{font-size:1.414rem}abbr[title]{text-decoration:underline double}.burger__container{height:4rem;display:flex;align-items:center;padding:0 1.5rem;position:fixed;width:100%;background:#fff;z-index:2}@media screen and (min-width:800px){.burger__container{display:none}}.burger{position:relative;width:28px;height:28px;cursor:pointer}.burger__meat{position:absolute;width:28px;height:2px;background:#111;top:calc(50% - 2px/2);left:calc(50% - 28px/2);transition:all 150ms ease-in}.burger__meat--1{transform:translateY(-10px)}.burger__meat--2{width:calc(28px - 6px)}.burger__meat--3{transform:translateY(10px)}.nav--active .burger__meat--1{transform:rotate(45deg)}.nav--active .burger__meat--2{opacity:0}.nav--active .burger__meat--3{transform:rotate(-45deg)}.nav{font-size:16px;position:fixed;display:flex;justify-content:center;align-items:center;background:#fff;visibility:hidden;z-index:1}@media screen and (min-width:800px){.nav{display:block;visibility:visible;padding-top:3em;width:100px}}.nav--active .nav{visibility:visible;height:100%;width:100%}@media screen and (min-width:800px){.nav--active .nav{width:100px}}.nav__list{text-align:right;list-style:none;margin:0;padding:0;width:50%}@media screen and (min-width:800px){.nav__list{width:auto}}@media screen and (max-width:799px){.nav__list{transform:translateY(-25px);opacity:0}.nav--active .nav__list{transform:translateY(0);opacity:1;transition:all 500ms ease}}.nav__list li{margin-bottom:3em;line-height:1.5em}.nav__list li:last-of-type{margin-bottom:0}@media screen and (min-width:800px){.nav__list li{margin-bottom:1.75em}}.nav__list a{color:#9b9b9b;text-decoration:none;font-size:2em}.nav__list a.active{color:#111}.nav__list a:hover{color:#111}@media screen and (min-width:800px){.nav__list a{font-size:1em}}.social-icons{display:flex;justify-content:center}.social-icons__link{padding:.8rem}.social-icons__link:not(:last-child){margin-right:1em}.social-icons__link .social-icons__icon{width:1.4rem;height:1.4rem;background-size:contain;background-repeat:no-repeat}body.nav--active{overflow:hidden}main{padding:3rem 1.5rem 1rem}@media screen and (min-width:800px){main{padding-left:calc(1.5rem + 100px)}}@media screen and (max-width:799px){main{padding-top:calc(3rem + 4rem)}}.post-list__container{margin:0 auto;max-width:1200px;width:100%}@media screen and (min-width:800px){.post-list__container{padding-left:50px}}.post-list{list-style:none;margin:0;padding:0}.post{margin-bottom:1.5rem}.post__title{margin-top:0;font-weight:500}.post__title a{color:#111;text-decoration:none}.post__date{color:#9b9b9b;font-size:.8rem}.tag__header{align-items:baseline;display:flex;margin:0 auto 3rem}.tag__header a,.tag__header .separator{color:#9b9b9b;font-size:1.5rem}.tag__header a{text-decoration:none}.tag__header .separator{align-self:center;margin:0 5px}.tag__header .tag__term{margin:0;font-weight:600} \ No newline at end of file diff --git a/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/tags.scss_f297ce3c5afa481cf9ec4d5576b36b22.json b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/tags.scss_f297ce3c5afa481cf9ec4d5576b36b22.json new file mode 100644 index 0000000..ed926bb --- /dev/null +++ b/blog/themes/theme/exampleSite/resources/_gen/assets/scss/scss/pages/tags.scss_f297ce3c5afa481cf9ec4d5576b36b22.json @@ -0,0 +1 @@ +{"Target":"css/tags.min.366a1c4761304f40d7ab2549c070344a1c8cfbfefc263bb2187aa968d9f66612.css","MediaType":"text/css","Data":{"Integrity":"sha256-NmocR2EwT0DXqyVJwHA0ShyM+/78JjuyGHqpaNn2ZhI="}} \ No newline at end of file diff --git a/blog/themes/theme/i18n/de.toml b/blog/themes/theme/i18n/de.toml new file mode 100644 index 0000000..9213bd0 --- /dev/null +++ b/blog/themes/theme/i18n/de.toml @@ -0,0 +1,8 @@ +[prev_post] +other = "Vorheriger Beitrag" + +[next_post] +other = "Nächster Beitrag" + +[all_posts] +other = "Alle Beitäge" \ No newline at end of file diff --git a/blog/themes/theme/i18n/en.toml b/blog/themes/theme/i18n/en.toml new file mode 100644 index 0000000..452a193 --- /dev/null +++ b/blog/themes/theme/i18n/en.toml @@ -0,0 +1,8 @@ +[prev_post] +other = "Previous Post" + +[next_post] +other = "Next Post" + +[all_posts] +other = "All posts" \ No newline at end of file diff --git a/blog/themes/theme/images/screenshot.png b/blog/themes/theme/images/screenshot.png new file mode 100644 index 0000000..8345f89 Binary files /dev/null and b/blog/themes/theme/images/screenshot.png differ diff --git a/blog/themes/theme/images/tn.png b/blog/themes/theme/images/tn.png new file mode 100644 index 0000000..8272f8c Binary files /dev/null and b/blog/themes/theme/images/tn.png differ diff --git a/blog/themes/theme/layouts/404.html b/blog/themes/theme/layouts/404.html new file mode 100644 index 0000000..ff2173a --- /dev/null +++ b/blog/themes/theme/layouts/404.html @@ -0,0 +1,7 @@ +{{ define "main" }} +
+

+ This is not the page you were looking for +

+
+{{ end }} diff --git a/blog/themes/theme/layouts/_default/_markup/render-link.html b/blog/themes/theme/layouts/_default/_markup/render-link.html new file mode 100644 index 0000000..d445e88 --- /dev/null +++ b/blog/themes/theme/layouts/_default/_markup/render-link.html @@ -0,0 +1,4 @@ +{{ .Text | safeHTML }} diff --git a/blog/themes/theme/layouts/_default/baseof.html b/blog/themes/theme/layouts/_default/baseof.html new file mode 100644 index 0000000..55395cb --- /dev/null +++ b/blog/themes/theme/layouts/_default/baseof.html @@ -0,0 +1,67 @@ + + + + + {{ if .IsHome }}{{ .Site.Title }}{{ else }}{{ .Title }} | {{ .Site.Title }}{{ end }} + + + + + + + + + + + + {{ if isset .Site.Params "twitter" }} + + + + + + {{ end }} + + {{ partial "favicon.html" }} + + + {{ block "styles" . }} {{ end }} + {{ $base_styles_opts := .Scratch.Get "style_opts" | default (dict "src" "scss/pages/about.scss" "dest" "css/about.css") }} + {{ $custom_styles_opts := (dict "src" "scss/custom.scss" "dest" "css/custom.css") }} + + {{ $current_page := . }} + + {{ range (slice $base_styles_opts $custom_styles_opts) }} + {{ $style := resources.Get .src | resources.ExecuteAsTemplate .dest $current_page | toCSS | minify | fingerprint }} + + {{ end }} + + {{ range .AlternativeOutputFormats }} + {{ printf `` .Rel .MediaType.Type .MediaType.Suffix .Permalink $.Site.Title | safeHTML }} + {{ end }} + {{ block "links" . }} {{ end }} + {{ partial "seo-schema.html" .}} + + {{- if not .Site.IsServer -}} + {{ template "_internal/google_analytics_async.html" . }} + {{- end -}} + + + + + {{ partial "burger.html" .}} + + {{ partial "nav.html" .}} + +
+ {{ block "main" . }} {{ end }} +
+ + {{ block "footer" . }} {{ end }} + + {{ $script := resources.Get "js/index.js" | minify | fingerprint }} + + {{ block "scripts" . }} {{ end }} + + + diff --git a/blog/themes/theme/layouts/_default/list.html b/blog/themes/theme/layouts/_default/list.html new file mode 100644 index 0000000..6270004 --- /dev/null +++ b/blog/themes/theme/layouts/_default/list.html @@ -0,0 +1,64 @@ +{{ define "styles" }} + {{ $.Scratch.Set "style_opts" (dict "src" "scss/pages/posts.scss" "dest" "css/posts.css") }} +{{ end }} + +{{ define "main" }} + +{{ $dateFormat := .Site.Params.dateFormat | default "Jan 2 2006" }} +{{ $hasAuthor := false }} +{{- if .Data.Term }} + {{range where (where (where (where site.Pages "Section" "author") ".Params.guest" "!=" true) ".Params.name" "!=" nil) ".Data.Term" "==" $.Data.Term}} + {{$longContent := gt (len .Content) 250}} + {{ $hasAuthor = true }} +
+

{{.Params.name}}

+ {{if .Params.photo }} +
+
+ {{ else }} +
+
+ {{ end }} +
+ {{ .Content }} +
+ {{ end }} +{{ end }} +{{- if (not $hasAuthor) }} +

BLOG

+{{- end }} +
+
+
    + {{ range .Pages }} +
  • +
    + +

    + {{ .Title }} +

    +
    +

    + {{ .Summary }}
    + by + {{ range .Param "author" }} + + {{ . }} + + {{ end }} +

    +
    + {{ partial "tags.html" .}} +
    +
  • + {{ end }} +
+ {{- if not $hasAuthor }} + {{ partial "browse-by-tag.html" .}} + {{- end }} +
+ +{{ end }} diff --git a/blog/themes/theme/layouts/_default/single.html b/blog/themes/theme/layouts/_default/single.html new file mode 100644 index 0000000..1048cf8 --- /dev/null +++ b/blog/themes/theme/layouts/_default/single.html @@ -0,0 +1,59 @@ +{{ define "styles" }} + {{ $.Scratch.Set "style_opts" (dict "src" "scss/pages/post.scss" "dest" "css/post.css") }} +{{ end }} + +{{ define "main" }} + {{ $dateFormat := .Site.Params.dateFormat | default "Jan 2 2006" }} + +
+
+
+
+

{{.Title}}

+ {{ if .Date }} {{ end }} +
+ by + {{ range .Param "author" }} + + {{ . }} + + {{ end }} +
+
+
+ {{ partial "anchored-headings.html" .Content | emojify }} + {{ if or .Params.math .Site.Params.math }} + {{ partial "math.html" . }} + {{ end }} +
+ {{ partial "tags.html" .}} {{ partial "post-pagination.html" .}} + {{ template "_internal/disqus.html" . }} +
+ {{ partial "social-icons.html" .}} +

{{ replace .Site.Copyright "{year}" now.Year }}

+
+
+
+ {{ if .Params.toc }} +
+ {{ if .Site.Params.showPageTitleInTOC }}
{{ .Title }}
{{ end }} + {{ .TableOfContents }} +
+ {{ end }} +
+ +{{ end }} + +{{ define "scripts" }} + {{/* Hardcode a specific prismjs version to avoid a redirect on every page load. */}} + + + {{/* Automatically loads the needed languages to highlight the code blocks. */}} + + + {{ if .Params.toc }} + + {{ end }} + +{{ end }} diff --git a/blog/themes/theme/layouts/about/single.html b/blog/themes/theme/layouts/about/single.html new file mode 100644 index 0000000..ba31d13 --- /dev/null +++ b/blog/themes/theme/layouts/about/single.html @@ -0,0 +1,52 @@ +{{ define "styles" }} + {{ $.Scratch.Set "style_opts" (dict "src" "scss/pages/post.scss" "dest" "css/post.css") }} +{{ end }} + +{{ define "main" }} +
+
+
+
+

{{.Title}}

+
+
+ {{ partial "anchored-headings.html" .Content }} +
+

Humans

+
+ {{range where (where (where site.Pages "Section" "author") ".Params.guest" "!=" true) ".Params.name" "!=" nil}} + {{$longContent := gt (len .Content) 250}} +
+

{{ .Params.name }}

+ {{if .Params.photo }} +
+
+ {{ else }} +
+
+ {{ end }} +
+ {{ .Content }} +
+ {{if $longContent}} + + {{end}} +
+ {{ end }} +
+
+ {{ partial "social-icons.html" .}} +

{{ replace .Site.Copyright "{year}" now.Year }}

+
+
+
+
+ +{{ end }} diff --git a/blog/themes/theme/layouts/index.html b/blog/themes/theme/layouts/index.html new file mode 100644 index 0000000..123bc1f --- /dev/null +++ b/blog/themes/theme/layouts/index.html @@ -0,0 +1,25 @@ +{{ define "styles" }} + {{ $.Scratch.Set "style_opts" (dict "src" "scss/pages/about.scss" "dest" "css/about.css") }} +{{ end }} + +{{ define "main" }} + +
+
+ +

{{ .Params.heading }}.

+ {{ if isset .Params "handle" }} + @{{ .Params.handle }} + {{ end }} +

+ {{ .Params.subheading }} +

+ +

+ {{ .Params.purpose }} +

+ {{ partial "social-icons.html" .}} +
+
+ +{{ end }} diff --git a/blog/themes/theme/layouts/partials/anchored-headings.html b/blog/themes/theme/layouts/partials/anchored-headings.html new file mode 100644 index 0000000..62a5084 --- /dev/null +++ b/blog/themes/theme/layouts/partials/anchored-headings.html @@ -0,0 +1,2 @@ + +{{ . | replaceRE "()" "${1}#${3}" | safeHTML }} diff --git a/blog/themes/theme/layouts/partials/browse-by-tag.html b/blog/themes/theme/layouts/partials/browse-by-tag.html new file mode 100644 index 0000000..65a3550 --- /dev/null +++ b/blog/themes/theme/layouts/partials/browse-by-tag.html @@ -0,0 +1,7 @@ + diff --git a/blog/themes/theme/layouts/partials/burger.html b/blog/themes/theme/layouts/partials/burger.html new file mode 100644 index 0000000..9ff4a46 --- /dev/null +++ b/blog/themes/theme/layouts/partials/burger.html @@ -0,0 +1,7 @@ +
+
+
+
+
+
+
diff --git a/blog/themes/theme/layouts/partials/favicon.html b/blog/themes/theme/layouts/partials/favicon.html new file mode 100644 index 0000000..4f6aa07 --- /dev/null +++ b/blog/themes/theme/layouts/partials/favicon.html @@ -0,0 +1 @@ + diff --git a/blog/themes/theme/layouts/partials/math.html b/blog/themes/theme/layouts/partials/math.html new file mode 100644 index 0000000..0e79917 --- /dev/null +++ b/blog/themes/theme/layouts/partials/math.html @@ -0,0 +1,11 @@ + + + + \ No newline at end of file diff --git a/blog/themes/theme/layouts/partials/nav.html b/blog/themes/theme/layouts/partials/nav.html new file mode 100644 index 0000000..d3b9bcd --- /dev/null +++ b/blog/themes/theme/layouts/partials/nav.html @@ -0,0 +1,11 @@ + diff --git a/blog/themes/theme/layouts/partials/pagination.html b/blog/themes/theme/layouts/partials/pagination.html new file mode 100644 index 0000000..5b1d5d9 --- /dev/null +++ b/blog/themes/theme/layouts/partials/pagination.html @@ -0,0 +1,7 @@ +
+ {{ if .Paginator.HasPrev }} + + {{ end }} {{ if .Paginator.HasNext }} + + {{ end }} +
diff --git a/blog/themes/theme/layouts/partials/post-pagination.html b/blog/themes/theme/layouts/partials/post-pagination.html new file mode 100644 index 0000000..5b44c55 --- /dev/null +++ b/blog/themes/theme/layouts/partials/post-pagination.html @@ -0,0 +1,15 @@ + diff --git a/blog/themes/theme/layouts/partials/seo-schema.html b/blog/themes/theme/layouts/partials/seo-schema.html new file mode 100644 index 0000000..8ee4cfa --- /dev/null +++ b/blog/themes/theme/layouts/partials/seo-schema.html @@ -0,0 +1,42 @@ + + + diff --git a/blog/themes/theme/layouts/partials/social-icons.html b/blog/themes/theme/layouts/partials/social-icons.html new file mode 100644 index 0000000..bdcadbf --- /dev/null +++ b/blog/themes/theme/layouts/partials/social-icons.html @@ -0,0 +1,15 @@ +{{ $currentPage := . }} +{{ $icons := .Site.Params.iconOrder | default (slice "Twitter" "GitHub" "Email" "Mastodon" "Facebook" "GitLab" "Instagram" "LinkedIn" "YouTube") }} + + diff --git a/blog/themes/theme/layouts/partials/tags.html b/blog/themes/theme/layouts/partials/tags.html new file mode 100644 index 0000000..3ab8030 --- /dev/null +++ b/blog/themes/theme/layouts/partials/tags.html @@ -0,0 +1,12 @@ +{{ $taxo := "tags" }} +{{ with .Param $taxo }} +
    + {{ range $index, $tag := . }} + {{ with $.Site.GetPage (printf "/%s/%s" $taxo $tag) -}} +
  • + {{ $tag }} +
  • + {{- end -}} + {{- end -}} +
+{{ end }} diff --git a/blog/themes/theme/layouts/taxonomy/tag.html b/blog/themes/theme/layouts/taxonomy/tag.html new file mode 100644 index 0000000..7ff5e12 --- /dev/null +++ b/blog/themes/theme/layouts/taxonomy/tag.html @@ -0,0 +1,37 @@ +{{ define "styles" }} + {{ $.Scratch.Set "style_opts" (dict "src" "scss/pages/tags.scss" "dest" "css/tags.css") }} +{{ end }} + +{{ define "main" }} + {{ $dateFormat := .Site.Params.dateFormat | default "Jan 2 2006" }} + +
+
+ {{ i18n "all_posts" }}/ +

{{ .Title }}

+
+
    + {{ range .Data.Pages }} +
  • +
    + +

    + {{ .Title }} +

    +
    +

    + {{ .Summary }}
    + by + {{ range .Param "author" }} + + {{ . }} + + {{ end }} +

    +
    +
    +
  • + {{ end }} +
+
+{{ end }} diff --git a/blog/themes/theme/package.json b/blog/themes/theme/package.json new file mode 100644 index 0000000..5e8eceb --- /dev/null +++ b/blog/themes/theme/package.json @@ -0,0 +1,36 @@ +{ + "name": "hugo-theme-codex", + "version": "1.5.0", + "author": "Jake Wiesler", + "description": "A minimal blog theme for Hugo", + "license": "MIT", + "scripts": { + "develop": "hugo server -s ./exampleSite/", + "build:example": "hugo -s ./exampleSite/", + "format": "prettier ./ --write" + }, + "husky": { + "hooks": { + "pre-commit": "lint-staged" + } + }, + "devDependencies": { + "husky": "^4.2.5", + "lint-staged": ">=10", + "prettier": "^2.0.5" + }, + "browserslist": [ + "last 2 version", + "> 2%" + ], + "homepage": "https://github.com/jakewies/hugo-theme-codex", + "bugs": "https://github.com/jakewies/hugo-theme-codex/issues", + "keywords": [ + "hugo", + "blog", + "go" + ], + "lint-staged": { + "*.{js,css,scss}": "prettier --write" + } +} diff --git a/blog/themes/theme/prettier.config.js b/blog/themes/theme/prettier.config.js new file mode 100644 index 0000000..d174dc0 --- /dev/null +++ b/blog/themes/theme/prettier.config.js @@ -0,0 +1,13 @@ +module.exports = { + arrowParens: "avoid", + bracketSpacing: false, + endOfLine: "lf", + htmlWhitespaceSensitivity: "css", + printWidth: 80, + proseWrap: "always", + semi: true, + singleQuote: false, + tabWidth: 2, + trailingComma: "all", + useTabs: false, +}; diff --git a/blog/themes/theme/static/favicon.ico b/blog/themes/theme/static/favicon.ico new file mode 100644 index 0000000..1ba6200 Binary files /dev/null and b/blog/themes/theme/static/favicon.ico differ diff --git a/blog/themes/theme/static/js/table-of-contents.js b/blog/themes/theme/static/js/table-of-contents.js new file mode 100644 index 0000000..911adbf --- /dev/null +++ b/blog/themes/theme/static/js/table-of-contents.js @@ -0,0 +1,17 @@ +window.addEventListener('DOMContentLoaded', () => { + const observer = new IntersectionObserver(entries => { + entries.forEach(entry => { + const id = entry.target.getAttribute('id'); + if (entry.intersectionRatio > 0) { + document.querySelector(`#TableOfContents ul li a[href="#${id}"]`).classList.add('active'); + } else { + document.querySelector(`#TableOfContents ul li a[href="#${id}"]`).classList.remove('active'); + } + }); + }); + + // Track all headers that have an `id` applied + document.querySelectorAll('article h3[id], article h2[id]').forEach((section) => { + observer.observe(section); + }); +}); diff --git a/blog/themes/theme/static/svg/chevron-left.svg b/blog/themes/theme/static/svg/chevron-left.svg new file mode 100644 index 0000000..3187d47 --- /dev/null +++ b/blog/themes/theme/static/svg/chevron-left.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/email.svg b/blog/themes/theme/static/svg/email.svg new file mode 100644 index 0000000..2af169e --- /dev/null +++ b/blog/themes/theme/static/svg/email.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/facebook.svg b/blog/themes/theme/static/svg/facebook.svg new file mode 100644 index 0000000..2570f56 --- /dev/null +++ b/blog/themes/theme/static/svg/facebook.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/github.svg b/blog/themes/theme/static/svg/github.svg new file mode 100644 index 0000000..5426bf4 --- /dev/null +++ b/blog/themes/theme/static/svg/github.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/gitlab.svg b/blog/themes/theme/static/svg/gitlab.svg new file mode 100644 index 0000000..85d54a1 --- /dev/null +++ b/blog/themes/theme/static/svg/gitlab.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/instagram.svg b/blog/themes/theme/static/svg/instagram.svg new file mode 100644 index 0000000..9fdb8e3 --- /dev/null +++ b/blog/themes/theme/static/svg/instagram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/linkedin.svg b/blog/themes/theme/static/svg/linkedin.svg new file mode 100644 index 0000000..3953109 --- /dev/null +++ b/blog/themes/theme/static/svg/linkedin.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/mastodon.svg b/blog/themes/theme/static/svg/mastodon.svg new file mode 100644 index 0000000..13a92c8 --- /dev/null +++ b/blog/themes/theme/static/svg/mastodon.svg @@ -0,0 +1,63 @@ + + + + + + image/svg+xml + + + + + + + + + + diff --git a/blog/themes/theme/static/svg/twitter.svg b/blog/themes/theme/static/svg/twitter.svg new file mode 100644 index 0000000..13c97b9 --- /dev/null +++ b/blog/themes/theme/static/svg/twitter.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/static/svg/youtube.svg b/blog/themes/theme/static/svg/youtube.svg new file mode 100644 index 0000000..c482438 --- /dev/null +++ b/blog/themes/theme/static/svg/youtube.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/blog/themes/theme/theme.toml b/blog/themes/theme/theme.toml new file mode 100644 index 0000000..3e81a24 --- /dev/null +++ b/blog/themes/theme/theme.toml @@ -0,0 +1,15 @@ +# theme.toml template for a Hugo theme +# See https://github.com/gohugoio/hugoThemes#themetoml for an example + +name = "Codex" +license = "MIT" +licenselink = "https://github.com/jakewies/hugo-theme-codex/blob/master/LICENSE.md" +description = "A minimal blog theme for hugo" +homepage = "https://github.com/jakewies/hugo-theme-codex" +tags = ["website", "starter", "blog"] +features = ["blog"] +min_version = "0.72.0" + +[author] + name = "Jake Wiesler" + homepage = "https://jakewiesler.com" diff --git a/blog/themes/theme/yarn.lock b/blog/themes/theme/yarn.lock new file mode 100644 index 0000000..2e56735 --- /dev/null +++ b/blog/themes/theme/yarn.lock @@ -0,0 +1,860 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@babel/code-frame@^7.0.0": + version "7.10.3" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.3.tgz#324bcfd8d35cd3d47dae18cde63d752086435e9a" + integrity sha512-fDx9eNW0qz0WkUeqL6tXEXzVlPh6Y5aCDEZesl0xBGA8ndRukX91Uk44ZqnkECp01NAZUdCAl+aiQNGi0k88Eg== + dependencies: + "@babel/highlight" "^7.10.3" + +"@babel/helper-validator-identifier@^7.10.3": + version "7.10.3" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.3.tgz#60d9847f98c4cea1b279e005fdb7c28be5412d15" + integrity sha512-bU8JvtlYpJSBPuj1VUmKpFGaDZuLxASky3LhaKj3bmpSTY6VWooSM8msk+Z0CZoErFye2tlABF6yDkT3FOPAXw== + +"@babel/highlight@^7.10.3": + version "7.10.3" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.3.tgz#c633bb34adf07c5c13156692f5922c81ec53f28d" + integrity sha512-Ih9B/u7AtgEnySE2L2F0Xm0GaM729XqqLfHkalTsbjXGyqmf/6M0Cu0WpvqueUlW+xk88BHw9Nkpj49naU+vWw== + dependencies: + "@babel/helper-validator-identifier" "^7.10.3" + chalk "^2.0.0" + js-tokens "^4.0.0" + +"@types/color-name@^1.1.1": + version "1.1.1" + resolved "https://registry.yarnpkg.com/@types/color-name/-/color-name-1.1.1.tgz#1c1261bbeaa10a8055bbc5d8ab84b7b2afc846a0" + integrity sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ== + +"@types/minimatch@^3.0.3": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" + integrity sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA== + +"@types/parse-json@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" + integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== + +aggregate-error@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.0.1.tgz#db2fe7246e536f40d9b5442a39e117d7dd6a24e0" + integrity sha512-quoaXsZ9/BLNae5yiNoUz+Nhkwz83GhWwtYFglcjEQB2NDHCIpApbqXxIFnm4Pq/Nvhrsq5sYJFyohrrxnTGAA== + dependencies: + clean-stack "^2.0.0" + indent-string "^4.0.0" + +ansi-colors@^3.2.1: + version "3.2.4" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" + integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA== + +ansi-escapes@^4.3.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.1.tgz#a5c47cc43181f1f38ffd7076837700d395522a61" + integrity sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA== + dependencies: + type-fest "^0.11.0" + +ansi-regex@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75" + integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg== + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.2.1.tgz#90ae75c424d008d2624c5bf29ead3177ebfcf359" + integrity sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA== + dependencies: + "@types/color-name" "^1.1.1" + color-convert "^2.0.1" + +array-differ@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/array-differ/-/array-differ-3.0.0.tgz#3cbb3d0f316810eafcc47624734237d6aee4ae6b" + integrity sha512-THtfYS6KtME/yIAhKjZ2ul7XI96lQGHRputJQHO80LAWQnuGP4iCIN8vdMRboGbIEYBwU33q8Tch1os2+X0kMg== + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +arrify@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-2.0.1.tgz#c9655e9331e0abcd588d2a7cad7e9956f66701fa" + integrity sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug== + +astral-regex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-2.0.0.tgz#483143c567aeed4785759c0865786dc77d7d2e31" + integrity sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ== + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +chalk@^2.0.0, chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.0.tgz#4e14870a618d9e2edd97dd8345fd9d9dc315646a" + integrity sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +ci-info@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" + integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== + +clean-stack@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" + integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== + +cli-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" + integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== + dependencies: + restore-cursor "^3.1.0" + +cli-truncate@2.1.0, cli-truncate@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-truncate/-/cli-truncate-2.1.0.tgz#c39e28bf05edcde5be3b98992a22deed5a2b93c7" + integrity sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg== + dependencies: + slice-ansi "^3.0.0" + string-width "^4.2.0" + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +commander@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae" + integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg== + +compare-versions@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/compare-versions/-/compare-versions-3.6.0.tgz#1a5689913685e5a87637b8d3ffca75514ec41d62" + integrity sha512-W6Af2Iw1z4CB7q4uU4hv646dW9GQuBM+YpC0UvUCWSD8w90SJjp+ujJuXaEMtAXBtSqGfMPuFOVn4/+FlaqfBA== + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= + +cosmiconfig@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" + integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.1.0" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.7.2" + +cross-spawn@^7.0.0: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +debug@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" + integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== + dependencies: + ms "^2.1.1" + +dedent@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c" + integrity sha1-JJXduvbrh0q7Dhvp3yLS5aVEMmw= + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +end-of-stream@^1.1.0: + version "1.4.4" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +enquirer@^2.3.5: + version "2.3.5" + resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.5.tgz#3ab2b838df0a9d8ab9e7dff235b0e8712ef92381" + integrity sha512-BNT1C08P9XD0vNg3J475yIUG+mVdp9T6towYFHUv897X0KoHBjB1shyrNmhmtHWKP17iSWgo7Gqh7BBuzLZMSA== + dependencies: + ansi-colors "^3.2.1" + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +execa@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-2.1.0.tgz#e5d3ecd837d2a60ec50f3da78fd39767747bbe99" + integrity sha512-Y/URAVapfbYy2Xp/gb6A0E7iR8xeqOCXsuuaoMn7A5PzrXUK84E1gyiEfq0wQd/GHA6GsoHWwhNq8anb0mleIw== + dependencies: + cross-spawn "^7.0.0" + get-stream "^5.0.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^3.0.0" + onetime "^5.1.0" + p-finally "^2.0.0" + signal-exit "^3.0.2" + strip-final-newline "^2.0.0" + +execa@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/execa/-/execa-4.0.2.tgz#ad87fb7b2d9d564f70d2b62d511bee41d5cbb240" + integrity sha512-QI2zLa6CjGWdiQsmSkZoGtDx2N+cQIGb3yNolGTdjSQzydzLgYYf8LRuagp7S7fPimjcrzUDSUFd/MgzELMi4Q== + dependencies: + cross-spawn "^7.0.0" + get-stream "^5.0.0" + human-signals "^1.1.1" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.0" + onetime "^5.1.0" + signal-exit "^3.0.2" + strip-final-newline "^2.0.0" + +figures@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" + integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== + dependencies: + escape-string-regexp "^1.0.5" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +find-up@^4.0.0, find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +find-versions@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/find-versions/-/find-versions-3.2.0.tgz#10297f98030a786829681690545ef659ed1d254e" + integrity sha512-P8WRou2S+oe222TOCHitLy8zj+SIsVJh52VP4lvXkaFVnOFFdoWv1H1Jjvel1aI6NCFOAaeAVm8qrI0odiLcww== + dependencies: + semver-regex "^2.0.0" + +get-own-enumerable-property-symbols@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664" + integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== + +get-stream@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.1.0.tgz#01203cdc92597f9b909067c3e656cc1f4d3c4dc9" + integrity sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw== + dependencies: + pump "^3.0.0" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +human-signals@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" + integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== + +husky@^4.2.5: + version "4.2.5" + resolved "https://registry.yarnpkg.com/husky/-/husky-4.2.5.tgz#2b4f7622673a71579f901d9885ed448394b5fa36" + integrity sha512-SYZ95AjKcX7goYVZtVZF2i6XiZcHknw50iXvY7b0MiGoj5RwdgRQNEHdb+gPDPCXKlzwrybjFjkL6FOj8uRhZQ== + dependencies: + chalk "^4.0.0" + ci-info "^2.0.0" + compare-versions "^3.6.0" + cosmiconfig "^6.0.0" + find-versions "^3.2.0" + opencollective-postinstall "^2.0.2" + pkg-dir "^4.2.0" + please-upgrade-node "^3.2.0" + slash "^3.0.0" + which-pm-runs "^1.0.0" + +ignore@^5.1.4: + version "5.1.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" + integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== + +import-fresh@^3.1.0: + version "3.2.1" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.2.1.tgz#633ff618506e793af5ac91bf48b72677e15cbe66" + integrity sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0= + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + integrity sha1-PkcprB9f3gJc19g6iW2rn09n2w8= + +is-regexp@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" + integrity sha1-/S2INUXEa6xaYz57mgnof6LLUGk= + +is-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" + integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= + +js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== + +lines-and-columns@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" + integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA= + +lint-staged@>=10: + version "10.2.11" + resolved "https://registry.yarnpkg.com/lint-staged/-/lint-staged-10.2.11.tgz#713c80877f2dc8b609b05bc59020234e766c9720" + integrity sha512-LRRrSogzbixYaZItE2APaS4l2eJMjjf5MbclRZpLJtcQJShcvUzKXsNeZgsLIZ0H0+fg2tL4B59fU9wHIHtFIA== + dependencies: + chalk "^4.0.0" + cli-truncate "2.1.0" + commander "^5.1.0" + cosmiconfig "^6.0.0" + debug "^4.1.1" + dedent "^0.7.0" + enquirer "^2.3.5" + execa "^4.0.1" + listr2 "^2.1.0" + log-symbols "^4.0.0" + micromatch "^4.0.2" + normalize-path "^3.0.0" + please-upgrade-node "^3.2.0" + string-argv "0.3.1" + stringify-object "^3.3.0" + +listr2@^2.1.0: + version "2.1.8" + resolved "https://registry.yarnpkg.com/listr2/-/listr2-2.1.8.tgz#8af7ebc70cdbe866ddbb6c80909142bd45758f1f" + integrity sha512-Op+hheiChfAphkJ5qUxZtHgyjlX9iNnAeFS/S134xw7mVSg0YVrQo1IY4/K+ElY6XgOPg2Ij4z07urUXR+YEew== + dependencies: + chalk "^4.0.0" + cli-truncate "^2.1.0" + figures "^3.2.0" + indent-string "^4.0.0" + log-update "^4.0.0" + p-map "^4.0.0" + rxjs "^6.5.5" + through "^2.3.8" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +log-symbols@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.0.0.tgz#69b3cc46d20f448eccdb75ea1fa733d9e821c920" + integrity sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA== + dependencies: + chalk "^4.0.0" + +log-update@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/log-update/-/log-update-4.0.0.tgz#589ecd352471f2a1c0c570287543a64dfd20e0a1" + integrity sha512-9fkkDevMefjg0mmzWFBW8YkFP91OrizzkW3diF7CpG+S2EYdy4+TVfGwz1zeF8x7hCx1ovSPTOE9Ngib74qqUg== + dependencies: + ansi-escapes "^4.3.0" + cli-cursor "^3.1.0" + slice-ansi "^4.0.0" + wrap-ansi "^6.2.0" + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +micromatch@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" + integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== + dependencies: + braces "^3.0.1" + picomatch "^2.0.5" + +mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +mri@^1.1.4: + version "1.1.5" + resolved "https://registry.yarnpkg.com/mri/-/mri-1.1.5.tgz#ce21dba2c69f74a9b7cf8a1ec62307e089e223e0" + integrity sha512-d2RKzMD4JNyHMbnbWnznPaa8vbdlq/4pNZ3IgdaGrVbBhebBsGUUE/6qorTMYNS6TwuH3ilfOlD2bf4Igh8CKg== + +ms@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +multimatch@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/multimatch/-/multimatch-4.0.0.tgz#8c3c0f6e3e8449ada0af3dd29efb491a375191b3" + integrity sha512-lDmx79y1z6i7RNx0ZGCPq1bzJ6ZoDDKbvh7jxr9SJcWLkShMzXrHbYVpTdnhNM5MXpDUxCQ4DgqVttVXlBgiBQ== + dependencies: + "@types/minimatch" "^3.0.3" + array-differ "^3.0.0" + array-union "^2.1.0" + arrify "^2.0.1" + minimatch "^3.0.4" + +normalize-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + +npm-run-path@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-3.1.0.tgz#7f91be317f6a466efed3c9f2980ad8a4ee8b0fa5" + integrity sha512-Dbl4A/VfiVGLgQv29URL9xshU8XDY1GeLy+fsaZ1AA8JDSfjvr5P5+pzRbWqRSBxk6/DW7MIh8lTM/PaGnP2kg== + dependencies: + path-key "^3.0.0" + +npm-run-path@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== + dependencies: + path-key "^3.0.0" + +once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= + dependencies: + wrappy "1" + +onetime@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.0.tgz#fff0f3c91617fe62bb50189636e99ac8a6df7be5" + integrity sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q== + dependencies: + mimic-fn "^2.1.0" + +opencollective-postinstall@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz#7a0fff978f6dbfa4d006238fbac98ed4198c3259" + integrity sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q== + +p-finally@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-2.0.1.tgz#bd6fcaa9c559a096b680806f4d657b3f0f240561" + integrity sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw== + +p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-map@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" + integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ== + dependencies: + aggregate-error "^3.0.0" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-json@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.0.0.tgz#73e5114c986d143efa3712d4ea24db9a4266f60f" + integrity sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + lines-and-columns "^1.1.6" + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +picomatch@^2.0.5: + version "2.2.2" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" + integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg== + +pkg-dir@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" + integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== + dependencies: + find-up "^4.0.0" + +please-upgrade-node@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/please-upgrade-node/-/please-upgrade-node-3.2.0.tgz#aeddd3f994c933e4ad98b99d9a556efa0e2fe942" + integrity sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg== + dependencies: + semver-compare "^1.0.0" + +prettier@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.0.5.tgz#d6d56282455243f2f92cc1716692c08aa31522d4" + integrity sha512-7PtVymN48hGcO4fGjybyBSIWDsLU4H4XlvOHfq91pz9kkGlonzwTfYkaIEwiRg/dAJF9YlbsduBAgtYLi+8cFg== + +pretty-quick@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pretty-quick/-/pretty-quick-2.0.1.tgz#417ee605ade98ecc686e72f63b5d28a2c35b43e9" + integrity sha512-y7bJt77XadjUr+P1uKqZxFWLddvj3SKY6EU4BuQtMxmmEFSMpbN132pUWdSG1g1mtUfO0noBvn7wBf0BVeomHg== + dependencies: + chalk "^2.4.2" + execa "^2.1.0" + find-up "^4.1.0" + ignore "^5.1.4" + mri "^1.1.4" + multimatch "^4.0.0" + +pump@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" + integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +restore-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" + integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== + dependencies: + onetime "^5.1.0" + signal-exit "^3.0.2" + +rxjs@^6.5.5: + version "6.5.5" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.5.5.tgz#c5c884e3094c8cfee31bf27eb87e54ccfc87f9ec" + integrity sha512-WfQI+1gohdf0Dai/Bbmk5L5ItH5tYqm3ki2c5GdWhKjalzjg93N3avFjVStyZZz+A2Em+ZxKH5bNghw9UeylGQ== + dependencies: + tslib "^1.9.0" + +semver-compare@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/semver-compare/-/semver-compare-1.0.0.tgz#0dee216a1c941ab37e9efb1788f6afc5ff5537fc" + integrity sha1-De4hahyUGrN+nvsXiPavxf9VN/w= + +semver-regex@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/semver-regex/-/semver-regex-2.0.0.tgz#a93c2c5844539a770233379107b38c7b4ac9d338" + integrity sha512-mUdIBBvdn0PLOeP3TEkMH7HHeUP3GjsXCwKarjv/kGmUFOYg1VqEemKhoQpWMu6X2I8kHeuVdGibLGkVK+/5Qw== + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +signal-exit@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" + integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +slice-ansi@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-3.0.0.tgz#31ddc10930a1b7e0b67b08c96c2f49b77a789787" + integrity sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ== + dependencies: + ansi-styles "^4.0.0" + astral-regex "^2.0.0" + is-fullwidth-code-point "^3.0.0" + +slice-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-4.0.0.tgz#500e8dd0fd55b05815086255b3195adf2a45fe6b" + integrity sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ== + dependencies: + ansi-styles "^4.0.0" + astral-regex "^2.0.0" + is-fullwidth-code-point "^3.0.0" + +string-argv@0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/string-argv/-/string-argv-0.3.1.tgz#95e2fbec0427ae19184935f816d74aaa4c5c19da" + integrity sha512-a1uQGz7IyVy9YwhqjZIZu1c8JO8dNIe20xBmSS6qu9kv++k3JGzCVmprbNN5Kn+BgzD5E7YYwg1CcjuJMRNsvg== + +string-width@^4.1.0, string-width@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.0.tgz#952182c46cc7b2c313d1596e623992bd163b72b5" + integrity sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.0" + +stringify-object@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629" + integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw== + dependencies: + get-own-enumerable-property-symbols "^3.0.0" + is-obj "^1.0.1" + is-regexp "^1.0.0" + +strip-ansi@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" + integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== + dependencies: + ansi-regex "^5.0.0" + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.1.0.tgz#68e32591df73e25ad1c4b49108a2ec507962bfd1" + integrity sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g== + dependencies: + has-flag "^4.0.0" + +through@^2.3.8: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +tslib@^1.9.0: + version "1.13.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.13.0.tgz#c881e13cc7015894ed914862d276436fa9a47043" + integrity sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q== + +type-fest@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.11.0.tgz#97abf0872310fed88a5c466b25681576145e33f1" + integrity sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ== + +which-pm-runs@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/which-pm-runs/-/which-pm-runs-1.0.0.tgz#670b3afbc552e0b55df6b7780ca74615f23ad1cb" + integrity sha1-Zws6+8VS4LVd9rd4DKdGFfI60cs= + +which@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +yaml@^1.7.2: + version "1.10.0" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.0.tgz#3b593add944876077d4d683fee01081bd9fff31e" + integrity sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg== diff --git a/cncf/apisnoop/35-Filter-out-deprecated-api-fields-from-coverage-info.org b/cncf/apisnoop/35-Filter-out-deprecated-api-fields-from-coverage-info.org index 33bea6a..04085eb 100644 --- a/cncf/apisnoop/35-Filter-out-deprecated-api-fields-from-coverage-info.org +++ b/cncf/apisnoop/35-Filter-out-deprecated-api-fields-from-coverage-info.org @@ -647,6 +647,5 @@ There were quite a few definitions dropped, but again this shouldn't affect test # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil # End: diff --git a/cncf/apisnoop/apisnoop.org b/cncf/apisnoop/apisnoop.org index 41e682d..91287e0 100644 --- a/cncf/apisnoop/apisnoop.org +++ b/cncf/apisnoop/apisnoop.org @@ -17,6 +17,5 @@ API Accesses # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/cncf/apisnoop/arch.org b/cncf/apisnoop/arch.org index f54c58a..0e29004 100644 --- a/cncf/apisnoop/arch.org +++ b/cncf/apisnoop/arch.org @@ -528,5 +528,4 @@ see-also: # eval: (require (quote ob-go)) # #eval: (require (quote ob-tmux)) # #eval: (require (quote ob-tmate)) -# org-confirm-babel-evaluate: nil # End: diff --git a/cncf/apisnoop/ci.org b/cncf/apisnoop/ci.org index b744a69..d5c4c78 100644 --- a/cncf/apisnoop/ci.org +++ b/cncf/apisnoop/ci.org @@ -928,6 +928,5 @@ command terminated with exit code 1 # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/cncf/apisnoop/example-issue.org b/cncf/apisnoop/example-issue.org index ebf6a93..6cd9822 100644 --- a/cncf/apisnoop/example-issue.org +++ b/cncf/apisnoop/example-issue.org @@ -289,7 +289,5 @@ At this point I was unsure how to get at the keys you see here: # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil -# socket: "/tmp/mysocket" # End: diff --git a/cncf/apisnoop/netlify.org b/cncf/apisnoop/netlify.org index 6abdbc0..b0c4380 100644 --- a/cncf/apisnoop/netlify.org +++ b/cncf/apisnoop/netlify.org @@ -455,7 +455,5 @@ cd $GOPATH/src/k8s.io/kubernetes # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t -# org-src-preserve-indentation: t # End: diff --git a/cncf/apisnoop/packet.org b/cncf/apisnoop/packet.org index 415d81a..2814bad 100644 --- a/cncf/apisnoop/packet.org +++ b/cncf/apisnoop/packet.org @@ -47,6 +47,5 @@ tmpdir creates folder in the ramdisk on load # org-babel-tmate-session-prefix: "" # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/cncf/ci/developing-on-deck.org b/cncf/ci/developing-on-deck.org index 3dba635..79610a2 100644 --- a/cncf/ci/developing-on-deck.org +++ b/cncf/ci/developing-on-deck.org @@ -101,6 +101,5 @@ Now we have a local Deck instance which can be develop on in quick iteration loo # eval: (xclip-mode 0) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/cncf/ci/prow.org b/cncf/ci/prow.org index 7e5e8c5..497870b 100644 --- a/cncf/ci/prow.org +++ b/cncf/ci/prow.org @@ -1842,8 +1842,6 @@ Visit http://localhost:8080 # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t -# org-src-preserve-indentation: t # End: diff --git a/cncf/crosscloudci/cross-cloud/auditlogs.org b/cncf/crosscloudci/cross-cloud/auditlogs.org index 3a4d36b..bff8ba9 100644 --- a/cncf/crosscloudci/cross-cloud/auditlogs.org +++ b/cncf/crosscloudci/cross-cloud/auditlogs.org @@ -251,7 +251,6 @@ Manually run vppswitch install # eval: (xclip-mode 0) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/cncf/pairing/globant.org b/cncf/pairing/globant.org index bb08f12..2d2f6f1 100644 --- a/cncf/pairing/globant.org +++ b/cncf/pairing/globant.org @@ -189,7 +189,6 @@ time $PREFIX/e2e.test \ # eval: (xclip-mode 0) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/docs.old/.dir-locals.el b/docs.old/.dir-locals.el new file mode 100644 index 0000000..45c2139 --- /dev/null +++ b/docs.old/.dir-locals.el @@ -0,0 +1,361 @@ +;;; Directory Local Variables +;;; For more information see (info "(emacs) Directory Variables") +;; ( + ;; (org-mode + ;; (org-babel-tmate-session-prefix . "") + ;; (org-babel-tmate-default-window-name . "main") + ;; (org-confirm-babel-evaluate . nil) + ;; (org-use-property-inheritance . t) + ;; (org-file-dir . (file-name-directory buffer-file-name)) + ;; (eval + ;; . + ;; (progn + ;; ;; (let ((socket-arg (concat ":socket " "FLOOPIE" )))) + ;; ;; (set (make-local-variable 'tmpdir) + ;; ;; (make-temp-file (concat "/dev/shm/" user-buffer "-") t)) + ;; (set (make-local-variable 'ssh-user) + ;; "pair") + ;; ;; user-login-name) + ;; ;; might be nice to set this as a global property in the org file + ;; (set (make-local-variable 'ssh-host) + ;; "ii.cncf.ci") + ;; (set (make-local-variable 'ssh-user-host) + ;; (concat ssh-user "@" ssh-host)) + ;; (set (make-local-variable 'time-stamp-zone) + ;; "Pacific/Auckland") + ;; (set (make-local-variable 'time-stamp-pattern) + ;; ;; https://www.emacswiki.org/emacs/TimeStamp + ;; "10/#+UPDATED: needs time-local formatted regexp") + ;; (set (make-local-variable 'user-buffer) + ;; (concat user-login-name "." (file-name-base load-file-name))) + ;; (set (make-local-variable 'socket) + ;; (concat "/tmp/" user-buffer ".target.iisocket")) + ;; (set (make-local-variable 'socket-param) + ;; (concat ":sockets " socket)) + ;; (set (make-local-variable 'item-str) + ;; "(nth 4 (org-heading-components))") + ;; (set (make-local-variable 'togetherly-port) + ;; (+ (random 60000) 1024)) + ;; (set (make-local-variable 'org-file-properties) + ;; (list + ;; (cons 'header-args:tmate + ;; (concat + ;; ":noweb yes" + ;; " :noweb-ref " item-str + ;; " :comments org" + ;; " :eval no-export" + ;; " :results silent " + ;; " :session (concat user-login-name \":main\" )" + ;; ;; " :session (concat user-login-name \":\" " "main" ")" + ;; ;; " :session (concat user-login-name \":\" " item-str ")" + ;; " :socket " socket + ;; " :window " user-login-name + ;; " :terminal sakura" + ;; " :exports code" + ;; ;; If you want each tmate command to run from a particular directory + ;; ;; " :prologue (concat \"cd \" ssh-dir \"\n\")" + ;; ;; " :prologue (concat "cd " org-file-dir "\n") )) + ;; )) + ;; (cons 'header-args:emacs-lisp + ;; (concat + ;; ":noweb yes" + ;; " :noweb-ref " item-str + ;; " :comments org" + ;; " :eval no-export" + ;; " :results code" + ;; " :exports both" + ;; )) + ;; (cons 'header-args:elisp + ;; (concat + ;; ":noweb yes" + ;; " :noweb-ref " item-str + ;; " :comments org" + ;; " :eval no-export" + ;; " :results code" + ;; " :exports both" + ;; )) + ;; (cons 'header-args:bash + ;; (concat + ;; ":noweb yes" + ;; " :noweb-ref " item-str + ;; " :comments org" + ;; " :eval no-export" + ;; " :results output code verbatis replace" + ;; " :exports both" + ;; " :wrap EXAMPLE" + ;; ;; This can help catch stderr and other issues + ;; ;; " :prologue \"exec 2>&1\n\"" + ;; ;; " :epilogue \":\n\"" + ;; ;; " :prologue exec 2>&1\n(\n" + ;; ;; " :epilogue )\n:\n" + ;; ;; If you want commands executing over tramp + ;; ;; " :dir (symbol-value 'tmpdir)" + ;; ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" + ;; ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" + ;; ;; If you want to feed an application via HEREDOC + ;; ;; :PROPERTIES: + ;; ;; " :prologue exec 2>&1\nbq query -n 2000 --nouse_legacy_sql </dev/null " + ;; "; echo Share the above with your friends and hit enter when done. " + ;; "; read " + ;; "; bash --login\"" + ;; ) + ;; ) + ;; (set (make-local-variable 'start-tmate-for-togetherly-client) + ;; (let ( + ;; (togetherly-socket (make-temp-file (concat "/tmp/" user-buffer "-"))) + ;; ) + ;; (concat + ;; "tmate -S " + ;; togetherly-socket + ;; " new-session -A -s " + ;; user-login-name + ;; " -n main " + ;; "\"tmate wait tmate-ready " + ;; "&& TMATE_CONNECT=\\$(" + ;; "tmate display -p '#{tmate_ssh} # " + ;; user-buffer + ;; "." + ;; togetherly-socket + ;; ".TOGETHERLY # " + ;; ;; would like this to be shorter + ;; (concat + ;; (format-time-string "%Y-%m-%d %T") + ;; (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) + ;; " # #{tmate_web} ') " + ;; "; echo \\$TMATE_CONNECT " + ;; "; (echo \\$TMATE_CONNECT | xclip -i -sel p -f | xclip -i -sel c ) 2>/dev/null " + ;; "; echo Share this url with someone both be able to togethrly the same buffer. " + ;; "; read " + ;; "; emacs -nw --eval '\(togetherly-client-quick-start \"" (number-to-string togetherly-port) "\")'\"" + ;; ) + ;; ) + ;; ) + ;; ;; at some point we can bring back working on remote hosts + ;; (set (make-local-variable 'start-tmate-over-ssh-command) + ;; (concat + ;; "tmate -S " + ;; socket + ;; " new-session -A -s " + ;; user-login-name + ;; " -n main " + ;; "\"tmate wait tmate-ready " + ;; "\\&\\& TMATE_CONNECT=\\$\\(" + ;; "tmate display -p '#{tmate_ssh} # " + ;; user-buffer + ;; ".target # " + ;; (concat + ;; (format-time-string "%Y-%m-%d %T") + ;; (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) + ;; " #{tmate_web} '\\) " + ;; "; echo \\$TMATE_CONNECT " + ;; "; \\(echo \\$TMATE_CONNECT \\| xclip -i -sel p -f \\| xclip -i -sel c \\) 2>/dev/null " + ;; "; echo Share the above with your friends and hit enter when done. " + ;; "; read " + ;; "; bash --login\"" + ;; ) + ;; ) + ;; ;; # eval: (set (make-local-variable 'ssh-user-host) (concat ssh-user "@" ssh-host)) + ;; ;; # eval: (set (make-local-variable 'start-tmate-over-ssh-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) + ;; ;; # eval: (set (make-local-variable 'start-tmate-locally-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) + ;; ;; # eval: (xclip-mode 1) + ;; ;; # eval: (gui-select-text (concat "ssh -tAX " ssh-user-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) + ;; (defun togetherly-server-start-now () + ;; "Start a Togetherly server with this buffer." + ;; (interactive) + ;; (cond ((null togetherly--server) + ;; (let* ((addr "127.0.0.1") + ;; (server-port togetherly-port) + ;; (server-name user-login-name) + ;; (server-proc (make-network-process + ;; :name "togetherly-server" :server t + ;; :service server-port :noquery t :host addr + ;; :sentinel 'togetherly--server-sentinel-function + ;; :filter 'togetherly--server-filter-function)) + ;; (rcolor (car togetherly-region-colors)) + ;; (pcolor (car togetherly-cursor-colors))) + ;; (setq togetherly-region-colors (cdr togetherly-region-colors) + ;; togetherly-cursor-colors (cdr togetherly-cursor-colors) + ;; togetherly--server `(,server-proc ,server-name ,rcolor . ,pcolor) + ;; togetherly--server-buffer (current-buffer) + ;; togetherly--server-clients nil + ;; togetherly--server-timer-object + ;; (run-with-timer nil togetherly-cursor-sync-rate + ;; 'togetherly--server-broadcast-cursor-positions)) + ;; (set (make-local-variable 'header-line-format) + ;; (concat " " (propertize server-name 'face `(:background ,pcolor))))) + ;; (add-hook 'before-change-functions 'togetherly--server-before-change nil t) + ;; (add-hook 'after-change-functions 'togetherly--server-after-change nil t) + ;; (add-hook 'kill-buffer-query-functions 'togetherly--server-kill-buffer-query) + ;; (populate-x-togetherly) ;; go ahead and create the tmate paste for the togetherly + ;; ) + ;; ((y-or-n-p "Togetherly server already started. Migrate to this buffer ? ") + ;; (set (make-local-variable 'header-line-format) + ;; (buffer-local-value 'header-line-format togetherly--server-buffer)) + ;; (add-hook 'before-change-functions 'togetherly--server-before-change nil t) + ;; (add-hook 'after-change-functions 'togetherly--server-after-change nil t) + ;; (with-current-buffer togetherly--server-buffer + ;; (remove-hook 'before-change-functions 'togetherly--server-before-change t) + ;; (remove-hook 'after-change-functions 'togetherly--server-after-change t) + ;; (kill-local-variable 'header-line-format)) + ;; (setq togetherly--server-buffer (current-buffer)) + ;; (togetherly--server-broadcast `(welcome ,(togetherly--buffer-string) . ,major-mode)) + ;; ) + ;; (t + ;; (message "Togetherly: Canceled.")))) + ;; (defun populate-x-togetherly () + ;; "Populate the clipboard with the command for a together client" + ;; (interactive) + ;; (message "Setting X Clipboard to contain the start-tmate command") + ;; (xclip-mode 1) + ;; (gui-select-text start-tmate-for-togetherly-client) + ;; ) + ;; (defun runs-and-exits-zero (program &rest args) + ;; "Run PROGRAM with ARGS and return the exit code." + ;; (with-temp-buffer + ;; (if (= 0 (apply 'call-process program nil (current-buffer) nil args)) + ;; 'true + ;; )) + ;; ) + ;; (defun xclip-working () + ;; "Quick Check to see if X is working." + ;; (if (getenv "DISPLAY") + ;; ;; this xset test is a bit flakey + ;; ;; (if (runs-and-exits-zero "xset" "q") + ;; ;; Using xclip to set an invalid selection is as lightly intrusive + ;; ;; check I could come up with, and not overwriting anything + ;; ;; however it seems to hang + ;; ;; (if (runs-and-exits-zero "xclip" "-selection" "unused") + ;; ;; 'true) + ;; 'true + ;; ;; ) + ;; ) + ;; ) + ;; (defun populate-x-clipboard () + ;; "Populate the X clipboard with the start-tmate-command" + ;; (message "Setting X Clipboard to contain the start-tmate command") + ;; (xclip-mode 1) + ;; (gui-select-text start-tmate-command) + ;; (xclip-mode 0) + ;; (with-current-buffer (get-buffer-create "start-tmate-command") + ;; (insert-for-yank "The following has been populated to your local X clipboard:\n") + ;; ) + ;; ) + ;; ;; For testing / setting DISPLAY to something else + ;; ;; (getenv "DISPLAY") + ;; ;; (setenv "DISPLAY" ":0") + ;; ;; As we start on other OSes, we'll need to copy this differently + ;; (if (xclip-working) + ;; (populate-x-clipboard) + ;; (with-current-buffer (get-buffer-create "start-tmate-command" ) + ;; (insert-for-yank "You will need to copy this manually:\n\n" ) + ;; ) + ;; ) + ;; ;; needs to be global, so it's availabel to the other buffer + ;; (setq tmate-command start-tmate-command) + ;; (with-current-buffer (get-buffer-create "start-tmate-command") + ;; (insert-for-yank + ;; (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) + ;; )) + ;; (switch-to-buffer "start-tmate-command") + ;; (y-or-n-p "Have you Pasted?") + ;; ;; https://www.wisdomandwonder.com/article/10630/how-fast-can-you-tangle-in-org-mode + ;; (setq help/default-gc-cons-threshold gc-cons-threshold) + ;; (defun help/set-gc-cons-threshold (&optional multiplier notify) + ;; "Set `gc-cons-threshold' either to its default value or a + ;; `multiplier' thereof." + ;; (let* ((new-multiplier (or multiplier 1)) + ;; (new-threshold (* help/default-gc-cons-threshold + ;; new-multiplier))) + ;; (setq gc-cons-threshold new-threshold) + ;; (when notify (message "Setting `gc-cons-threshold' to %s" + ;; new-threshold)))) + ;; (defun help/double-gc-cons-threshold () "Double `gc-cons-threshold'." (help/set-gc-cons-threshold 2)) + ;; (add-hook 'org-babel-pre-tangle-hook #'help/double-gc-cons-threshold) + ;; (add-hook 'org-babel-post-tangle-hook #'help/set-gc-cons-threshold) + ;; ;; info:org#Conflicts for org 9 and very recent yas + ;; (defun yas/org-very-safe-expand () + ;; (let ((yas/fallback-behavior 'return-nil)) (yas/expand))) + + ;; (yas/expand) + ;; (make-variable-buffer-local 'yas/trigger-key) + ;; (setq yas/trigger-key [tab]) + ;; (add-to-list 'org-tab-first-hook 'yas/org-very-safe-expand) + ;; (define-key yas/keymap [tab] 'yas/next-field) + ;; ;; (gui-select-text (concat "rm -fi " socket "; ssh -tAX " ssh-user "@" ssh-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) + ;; ;; (edebug-trace "TRACING socket:%S" socket) + ;; ;; (edebug-trace "TRACING org-babel-header-args:tmate %S" org-babel-header-args:emacs-lisp) + ;; ;; we could try and create a buffer / clear it on the fly + ;; ;; ssh later? + ;; ;; (with-current-buffer (get-buffer-create "start-tmate-command") + ;; ;; (insert-for-yank + ;; ;; (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) + ;; ;; )) + ;; ;; FIXME! How do we find out what our local filname is? + ;; ;; This was designed for dir-locals... can we reach in? + ;; ;; (switch-to-buffer (get-buffer buffer-file-name)) + ;; ;; (spacemacs/toggle-maximize-buffer) + ;; ) + ;; ) + ;; ) + ;; ) +;; Add Later +;; https://www.emacswiki.org/emacs/AutomaticFileHeaders #templates / updates etc +;; ^^ based on https://www.emacswiki.org/emacs/download/header2.el +;; ;; https://stackoverflow.com/questions/13228001/org-mode-nested-properties +;; https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/ +;; ^^ https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/cz0bb45/ +;;http://endlessparentheses.com/markdown-style-link-ids-in-org-mode.html diff --git a/docs/audit-policy.yaml b/docs.old/audit-policy.yaml similarity index 100% rename from docs/audit-policy.yaml rename to docs.old/audit-policy.yaml diff --git a/docs/installing-the-packet-cli.org b/docs.old/installing-the-packet-cli.org similarity index 100% rename from docs/installing-the-packet-cli.org rename to docs.old/installing-the-packet-cli.org diff --git a/docs/setup-kubetest-kind-on-packet.org b/docs.old/setup-kubetest-kind-on-packet.org similarity index 100% rename from docs/setup-kubetest-kind-on-packet.org rename to docs.old/setup-kubetest-kind-on-packet.org diff --git a/docs/start_session.sh b/docs.old/start_session.sh similarity index 100% rename from docs/start_session.sh rename to docs.old/start_session.sh diff --git a/docs/.dir-locals.el b/docs/.dir-locals.el deleted file mode 100644 index 0d51969..0000000 --- a/docs/.dir-locals.el +++ /dev/null @@ -1,361 +0,0 @@ -;;; Directory Local Variables -;;; For more information see (info "(emacs) Directory Variables") -( - (org-mode - (org-babel-tmate-session-prefix . "") - (org-babel-tmate-default-window-name . "main") - (org-confirm-babel-evaluate . nil) - (org-use-property-inheritance . t) - (org-file-dir . (file-name-directory buffer-file-name)) - (eval - . - (progn - ;; (let ((socket-arg (concat ":socket " "FLOOPIE" )))) - ;; (set (make-local-variable 'tmpdir) - ;; (make-temp-file (concat "/dev/shm/" user-buffer "-") t)) - (set (make-local-variable 'ssh-user) - "pair") - ;; user-login-name) - ;; might be nice to set this as a global property in the org file - (set (make-local-variable 'ssh-host) - "ii.cncf.ci") - (set (make-local-variable 'ssh-user-host) - (concat ssh-user "@" ssh-host)) - (set (make-local-variable 'time-stamp-zone) - "Pacific/Auckland") - (set (make-local-variable 'time-stamp-pattern) - ;; https://www.emacswiki.org/emacs/TimeStamp - "10/#+UPDATED: needs time-local formatted regexp") - (set (make-local-variable 'user-buffer) - (concat user-login-name "." (file-name-base load-file-name))) - (set (make-local-variable 'socket) - (concat "/tmp/" user-buffer ".target.iisocket")) - (set (make-local-variable 'socket-param) - (concat ":sockets " socket)) - (set (make-local-variable 'item-str) - "(nth 4 (org-heading-components))") - (set (make-local-variable 'togetherly-port) - (+ (random 60000) 1024)) - (set (make-local-variable 'org-file-properties) - (list - (cons 'header-args:tmate - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results silent " - " :session (concat user-login-name \":main\" )" - ;; " :session (concat user-login-name \":\" " "main" ")" - ;; " :session (concat user-login-name \":\" " item-str ")" - " :socket " socket - " :window " user-login-name - " :terminal sakura" - " :exports code" - ;; If you want each tmate command to run from a particular directory - ;; " :prologue (concat \"cd \" ssh-dir \"\n\")" - ;; " :prologue (concat "cd " org-file-dir "\n") )) - )) - (cons 'header-args:emacs-lisp - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results code" - " :exports both" - )) - (cons 'header-args:elisp - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results code" - " :exports both" - )) - (cons 'header-args:bash - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results output code verbatis replace" - " :exports both" - " :wrap EXAMPLE" - ;; This can help catch stderr and other issues - ;; " :prologue \"exec 2>&1\n\"" - ;; " :epilogue \":\n\"" - ;; " :prologue exec 2>&1\n(\n" - ;; " :epilogue )\n:\n" - ;; If you want commands executing over tramp - ;; " :dir (symbol-value 'tmpdir)" - ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" - ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" - ;; If you want to feed an application via HEREDOC - ;; :PROPERTIES: - ;; " :prologue exec 2>&1\nbq query -n 2000 --nouse_legacy_sql </dev/null " - "; echo Share the above with your friends and hit enter when done. " - "; read " - "; bash --login\"" - ) - ) - (set (make-local-variable 'start-tmate-for-togetherly-client) - (let ( - (togetherly-socket (make-temp-file (concat "/tmp/" user-buffer "-"))) - ) - (concat - "tmate -S " - togetherly-socket - " new-session -A -s " - user-login-name - " -n main " - "\"tmate wait tmate-ready " - "&& TMATE_CONNECT=\\$(" - "tmate display -p '#{tmate_ssh} # " - user-buffer - "." - togetherly-socket - ".TOGETHERLY # " - ;; would like this to be shorter - (concat - (format-time-string "%Y-%m-%d %T") - (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) - " # #{tmate_web} ') " - "; echo \\$TMATE_CONNECT " - "; (echo \\$TMATE_CONNECT | xclip -i -sel p -f | xclip -i -sel c ) 2>/dev/null " - "; echo Share this url with someone both be able to togethrly the same buffer. " - "; read " - "; emacs -nw --eval '\(togetherly-client-quick-start \"" (number-to-string togetherly-port) "\")'\"" - ) - ) - ) - ;; at some point we can bring back working on remote hosts - (set (make-local-variable 'start-tmate-over-ssh-command) - (concat - "tmate -S " - socket - " new-session -A -s " - user-login-name - " -n main " - "\"tmate wait tmate-ready " - "\\&\\& TMATE_CONNECT=\\$\\(" - "tmate display -p '#{tmate_ssh} # " - user-buffer - ".target # " - (concat - (format-time-string "%Y-%m-%d %T") - (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) - " #{tmate_web} '\\) " - "; echo \\$TMATE_CONNECT " - "; \\(echo \\$TMATE_CONNECT \\| xclip -i -sel p -f \\| xclip -i -sel c \\) 2>/dev/null " - "; echo Share the above with your friends and hit enter when done. " - "; read " - "; bash --login\"" - ) - ) - ;; # eval: (set (make-local-variable 'ssh-user-host) (concat ssh-user "@" ssh-host)) - ;; # eval: (set (make-local-variable 'start-tmate-over-ssh-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) - ;; # eval: (set (make-local-variable 'start-tmate-locally-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) - ;; # eval: (xclip-mode 1) - ;; # eval: (gui-select-text (concat "ssh -tAX " ssh-user-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) - (defun togetherly-server-start-now () - "Start a Togetherly server with this buffer." - (interactive) - (cond ((null togetherly--server) - (let* ((addr "127.0.0.1") - (server-port togetherly-port) - (server-name user-login-name) - (server-proc (make-network-process - :name "togetherly-server" :server t - :service server-port :noquery t :host addr - :sentinel 'togetherly--server-sentinel-function - :filter 'togetherly--server-filter-function)) - (rcolor (car togetherly-region-colors)) - (pcolor (car togetherly-cursor-colors))) - (setq togetherly-region-colors (cdr togetherly-region-colors) - togetherly-cursor-colors (cdr togetherly-cursor-colors) - togetherly--server `(,server-proc ,server-name ,rcolor . ,pcolor) - togetherly--server-buffer (current-buffer) - togetherly--server-clients nil - togetherly--server-timer-object - (run-with-timer nil togetherly-cursor-sync-rate - 'togetherly--server-broadcast-cursor-positions)) - (set (make-local-variable 'header-line-format) - (concat " " (propertize server-name 'face `(:background ,pcolor))))) - (add-hook 'before-change-functions 'togetherly--server-before-change nil t) - (add-hook 'after-change-functions 'togetherly--server-after-change nil t) - (add-hook 'kill-buffer-query-functions 'togetherly--server-kill-buffer-query) - (populate-x-togetherly) ;; go ahead and create the tmate paste for the togetherly - ) - ((y-or-n-p "Togetherly server already started. Migrate to this buffer ? ") - (set (make-local-variable 'header-line-format) - (buffer-local-value 'header-line-format togetherly--server-buffer)) - (add-hook 'before-change-functions 'togetherly--server-before-change nil t) - (add-hook 'after-change-functions 'togetherly--server-after-change nil t) - (with-current-buffer togetherly--server-buffer - (remove-hook 'before-change-functions 'togetherly--server-before-change t) - (remove-hook 'after-change-functions 'togetherly--server-after-change t) - (kill-local-variable 'header-line-format)) - (setq togetherly--server-buffer (current-buffer)) - (togetherly--server-broadcast `(welcome ,(togetherly--buffer-string) . ,major-mode)) - ) - (t - (message "Togetherly: Canceled.")))) - (defun populate-x-togetherly () - "Populate the clipboard with the command for a together client" - (interactive) - (message "Setting X Clipboard to contain the start-tmate command") - (xclip-mode 1) - (gui-select-text start-tmate-for-togetherly-client) - ) - (defun runs-and-exits-zero (program &rest args) - "Run PROGRAM with ARGS and return the exit code." - (with-temp-buffer - (if (= 0 (apply 'call-process program nil (current-buffer) nil args)) - 'true - )) - ) - (defun xclip-working () - "Quick Check to see if X is working." - (if (getenv "DISPLAY") - ;; this xset test is a bit flakey - ;; (if (runs-and-exits-zero "xset" "q") - ;; Using xclip to set an invalid selection is as lightly intrusive - ;; check I could come up with, and not overwriting anything - ;; however it seems to hang - ;; (if (runs-and-exits-zero "xclip" "-selection" "unused") - ;; 'true) - 'true - ;; ) - ) - ) - (defun populate-x-clipboard () - "Populate the X clipboard with the start-tmate-command" - (message "Setting X Clipboard to contain the start-tmate command") - (xclip-mode 1) - (gui-select-text start-tmate-command) - (xclip-mode 0) - (with-current-buffer (get-buffer-create "start-tmate-command") - (insert-for-yank "The following has been populated to your local X clipboard:\n") - ) - ) - ;; For testing / setting DISPLAY to something else - ;; (getenv "DISPLAY") - ;; (setenv "DISPLAY" ":0") - ;; As we start on other OSes, we'll need to copy this differently - (if (xclip-working) - (populate-x-clipboard) - (with-current-buffer (get-buffer-create "start-tmate-command" ) - (insert-for-yank "You will need to copy this manually:\n\n" ) - ) - ) - ;; needs to be global, so it's availabel to the other buffer - (setq tmate-command start-tmate-command) - (with-current-buffer (get-buffer-create "start-tmate-command") - (insert-for-yank - (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) - )) - (switch-to-buffer "start-tmate-command") - (y-or-n-p "Have you Pasted?") - ;; https://www.wisdomandwonder.com/article/10630/how-fast-can-you-tangle-in-org-mode - (setq help/default-gc-cons-threshold gc-cons-threshold) - (defun help/set-gc-cons-threshold (&optional multiplier notify) - "Set `gc-cons-threshold' either to its default value or a - `multiplier' thereof." - (let* ((new-multiplier (or multiplier 1)) - (new-threshold (* help/default-gc-cons-threshold - new-multiplier))) - (setq gc-cons-threshold new-threshold) - (when notify (message "Setting `gc-cons-threshold' to %s" - new-threshold)))) - (defun help/double-gc-cons-threshold () "Double `gc-cons-threshold'." (help/set-gc-cons-threshold 2)) - (add-hook 'org-babel-pre-tangle-hook #'help/double-gc-cons-threshold) - (add-hook 'org-babel-post-tangle-hook #'help/set-gc-cons-threshold) - ;; info:org#Conflicts for org 9 and very recent yas - (defun yas/org-very-safe-expand () - (let ((yas/fallback-behavior 'return-nil)) (yas/expand))) - - (yas/expand) - (make-variable-buffer-local 'yas/trigger-key) - (setq yas/trigger-key [tab]) - (add-to-list 'org-tab-first-hook 'yas/org-very-safe-expand) - (define-key yas/keymap [tab] 'yas/next-field) - ;; (gui-select-text (concat "rm -fi " socket "; ssh -tAX " ssh-user "@" ssh-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) - ;; (edebug-trace "TRACING socket:%S" socket) - ;; (edebug-trace "TRACING org-babel-header-args:tmate %S" org-babel-header-args:emacs-lisp) - ;; we could try and create a buffer / clear it on the fly - ;; ssh later? - ;; (with-current-buffer (get-buffer-create "start-tmate-command") - ;; (insert-for-yank - ;; (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) - ;; )) - ;; FIXME! How do we find out what our local filname is? - ;; This was designed for dir-locals... can we reach in? - ;; (switch-to-buffer (get-buffer buffer-file-name)) - ;; (spacemacs/toggle-maximize-buffer) - ) - ) - ) - ) -;; Add Later -;; https://www.emacswiki.org/emacs/AutomaticFileHeaders #templates / updates etc -;; ^^ based on https://www.emacswiki.org/emacs/download/header2.el -;; ;; https://stackoverflow.com/questions/13228001/org-mode-nested-properties -;; https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/ -;; ^^ https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/cz0bb45/ -;;http://endlessparentheses.com/markdown-style-link-ids-in-org-mode.html diff --git a/docs/CNAME b/docs/CNAME new file mode 100644 index 0000000..9e396bb --- /dev/null +++ b/docs/CNAME @@ -0,0 +1 @@ +blog.ii.coop \ No newline at end of file diff --git a/docs/deploying-talos-to-equinix.html b/docs/deploying-talos-to-equinix.html new file mode 100644 index 0000000..62b1e5f --- /dev/null +++ b/docs/deploying-talos-to-equinix.html @@ -0,0 +1,405 @@ +

Introduction

In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API.

Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters.

Talos is a modern OS designed to be secure, immutable, and minimal.

A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes.

The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities.

  • Why is this important? :: In general: Orchestrating a container based OS such as Talos (Flatcar, Fedora CoreOS, or RancherOS) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge.

Dependencies

What you'll need for this guide:

  • talosctl

  • kubectl

  • packet-cli

  • the ID and API token of existing Equinix Metal project

  • an existing Kubernetes cluster with a public IP (such as kind, minikube, or a cluster already on Equinix Metal)

Prelimiary steps

In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via packet-cli.

Set the correct project to create and manage resources in:

  read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID
+

The API key for your account or project:

  read -p 'PACKET_API_KEY: ' PACKET_API_KEY
+

Export the variables to be accessible from packet-cli and clusterctl later on:

  export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY
+

In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. +We'll need this IP address later for use in booting the servers. +If you have set up your existing cluster differently, it'll just need to be an IP that we can use.

  export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+

Setting up Cluster-API

Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster:

  clusterctl init -b talos -c talos -i packet
+

This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider.

Important note:

  • the bootstrap-talos controller in the cabpt-system namespace must be running a version greater than v0.2.0-alpha.8. The version can be displayed in with clusterctl upgrade plan when it's installed.

Setting up Matchbox

Currently, since Equinix Metal have not yet added support for Talos, it is necessary to install Matchbox to boot the servers (There is an issue in progress and feedback for adding support).

  • What is Matchbox? ::

Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters.

Here is the manifest for a basic matchbox installation:

  apiVersion: apps/v1
+  kind: Deployment
+  metadata:
+    name: matchbox
+  spec:
+    replicas: 1
+    strategy:
+      rollingUpdate:
+        maxUnavailable: 1
+    selector:
+      matchLabels:
+        name: matchbox
+    template:
+      metadata:
+        labels:
+          name: matchbox
+      spec:
+        containers:
+          - name: matchbox
+            image: quay.io/poseidon/matchbox:v0.9.0
+            env:
+              - name: MATCHBOX_ADDRESS
+                value: "0.0.0.0:8080"
+              - name: MATCHBOX_LOG_LEVEL
+                value: "debug"
+            ports:
+              - name: http
+                containerPort: 8080
+            livenessProbe:
+              initialDelaySeconds: 5
+              httpGet:
+                path: /
+                port: 8080
+            resources:
+              requests:
+                cpu: 30m
+                memory: 20Mi
+              limits:
+                cpu: 50m
+                memory: 50Mi
+            volumeMounts:
+              - name: data
+                mountPath: /var/lib/matchbox
+              - name: assets
+                mountPath: /var/lib/matchbox/assets
+        volumes:
+          - name: data
+            hostPath:
+              path: /var/local/matchbox/data
+          - name: assets
+            hostPath:
+              path: /var/local/matchbox/assets
+  ---
+  apiVersion: v1
+  kind: Service
+  metadata:
+    name: matchbox
+    annotations:
+      metallb.universe.tf/allow-shared-ip: nginx-ingress
+  spec:
+    type: LoadBalancer
+    selector:
+      name: matchbox
+    ports:
+      - name: http
+        protocol: TCP
+        port: 8080
+        targetPort: 8080
+

Save it as matchbox.yaml

The manifests above were inspired by the manifests in the matchbox repo. +For production it might be wise to use:

  • an Ingress with full TLS

  • a ReadWriteMany storage provider instead hostPath for scaling

With the manifests ready to go, we'll install Matchbox into the matchbox namespace on the existing cluster with the following commands:

  kubectl create ns matchbox
+  kubectl -n matchbox apply -f ./matchbox.yaml
+

You may need to patch the Service.spec.externalIPs to have an IP to access it from if one is not populated:

  kubectl -n matchbox patch \
+    service matchbox \
+    -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}"
+

Once the pod is live, We'll need to create a directory structure for storing Talos boot assets:

  kubectl -n matchbox exec -it \
+    deployment/matchbox -- \
+    mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos
+

Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.8.1 into the assets folder:

  kubectl -n matchbox exec -it \
+    deployment/matchbox -- \
+    wget -P /var/lib/matchbox/assets/talos \
+    https://github.com/talos-systems/talos/releases/download/v0.8.1/initramfs-amd64.xz \
+    https://github.com/talos-systems/talos/releases/download/v0.8.1/vmlinuz-amd64
+

Now that the assets have been downloaded, run a checksum against them to verify:

  kubectl -n matchbox exec -it \
+    deployment/matchbox -- \
+    sh -c "cd /var/lib/matchbox/assets/talos && \
+      wget -O- https://github.com/talos-systems/talos/releases/download/v0.8.1/sha512sum.txt 2> /dev/null \
+      | sed 's,_out/,,g' \
+      | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \
+      | sha512sum -c -"
+

Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it:

  export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}')
+

Profiles in Matchbox are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as profile-default-amd64.json

  {
+    "id": "default-amd64",
+    "name": "default-amd64",
+    "boot": {
+      "kernel": "/assets/talos/vmlinuz-amd64",
+      "initrd": [
+        "/assets/talos/initramfs-amd64.xz"
+      ],
+      "args": [
+        "initrd=initramfs-amd64.xz",
+        "init_on_alloc=1",
+        "init_on_free=1",
+        "slub_debug=P",
+        "pti=on",
+        "random.trust_cpu=on",
+        "console=tty0",
+        "console=ttyS1,115200n8",
+        "slab_nomerge",
+        "printk.devkmsg=on",
+        "talos.platform=packet",
+        "talos.config=none"
+      ]
+    }
+  }
+

Groups in Matchbox are a way of letting servers pick up profiles based on selectors. Save this file as group-default-amd64.json

  {
+    "id": "default-amd64",
+    "name": "default-amd64",
+    "profile": "default-amd64",
+    "selector": {
+      "arch": "amd64"
+    }
+  }
+

We'll copy the profile and group into their respective folders:

  kubectl -n matchbox \
+    cp ./profile-default-amd64.json \
+    $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json
+  kubectl -n matchbox \
+    cp ./group-default-amd64.json \
+    $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json
+

List the files to validate that they were written correctly:

  kubectl -n matchbox exec -it \
+    deployment/matchbox -- \
+    sh -c 'ls -alh /var/lib/matchbox/*/'
+

Testing Matchbox

Using curl, we can verify Matchbox's running state:

  curl http://$LOAD_BALANCER_IP:8080
+

To test matchbox, we'll create an invalid userdata configuration for Talos, saving as userdata.txt:

#!talos
+

Feel free to use a valid one.

Now let's talk to Equinix Metal to create a server pointing to the Matchbox server:

   packet-cli device create \
+    --hostname talos-pxe-boot-test-1 \
+    --plan c1.small.x86 \
+    --facility sjc1 \
+    --operating-system custom_ipxe \
+    --project-id "$PACKET_PROJECT_ID" \
+    --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \
+    --userdata-file=./userdata.txt
+

In the meanwhile, we can watch the logs to see how things are:

  kubectl -n matchbox logs deployment/matchbox -f --tail=100
+

Looking at the logs, there should be some get requests of resources that will be used to boot the OS.

Notes:

  • fun fact: you can run Matchbox on Android using Termux.

The cluster

Preparing the cluster

Here we will declare the template that we will shortly generate our usable cluster from:

  kind: TalosControlPlane
+  apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+  metadata:
+    name: "${CLUSTER_NAME}-control-plane"
+  spec:
+    version: ${KUBERNETES_VERSION}
+    replicas: ${CONTROL_PLANE_MACHINE_COUNT}
+    infrastructureTemplate:
+      apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+      kind: PacketMachineTemplate
+      name: "${CLUSTER_NAME}-control-plane"
+    controlPlaneConfig:
+      init:
+        generateType: init
+        configPatches:
+          - op: replace
+            path: /machine/install
+            value:
+              disk: /dev/sda
+              image: ghcr.io/talos-systems/installer:v0.8.1
+              bootloader: true
+              wipe: false
+              force: false
+          - op: add
+            path: /machine/kubelet/extraArgs
+            value:
+              cloud-provider: external
+          - op: add
+            path: /cluster/apiServer/extraArgs
+            value:
+              cloud-provider: external
+          - op: add
+            path: /cluster/controllerManager/extraArgs
+            value:
+              cloud-provider: external
+          - op: add
+            path: /cluster/extraManifests
+            value:
+            - https://github.com/packethost/packet-ccm/releases/download/v1.1.0/deployment.yaml
+          - op: add
+            path: /cluster/allowSchedulingOnMasters
+            value: true
+      controlplane:
+        generateType: controlplane
+        configPatches:
+          - op: replace
+            path: /machine/install
+            value:
+              disk: /dev/sda
+              image: ghcr.io/talos-systems/installer:v0.8.1
+              bootloader: true
+              wipe: false
+              force: false
+          - op: add
+            path: /machine/kubelet/extraArgs
+            value:
+              cloud-provider: external
+          - op: add
+            path: /cluster/apiServer/extraArgs
+            value:
+              cloud-provider: external
+          - op: add
+            path: /cluster/controllerManager/extraArgs
+            value:
+              cloud-provider: external
+          - op: add
+            path: /cluster/allowSchedulingOnMasters
+            value: true
+  ---
+  apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+  kind: PacketMachineTemplate
+  metadata:
+    name: "${CLUSTER_NAME}-control-plane"
+  spec:
+    template:
+      spec:
+        OS: custom_ipxe
+        ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64"
+        billingCycle: hourly
+        machineType: "${CONTROLPLANE_NODE_TYPE}"
+        sshKeys:
+          - "${SSH_KEY}"
+        tags: []
+  ---
+  apiVersion: cluster.x-k8s.io/v1alpha3
+  kind: Cluster
+  metadata:
+    name: "${CLUSTER_NAME}"
+  spec:
+    clusterNetwork:
+      pods:
+        cidrBlocks:
+          - ${POD_CIDR:=192.168.0.0/16}
+      services:
+        cidrBlocks:
+          - ${SERVICE_CIDR:=172.26.0.0/16}
+    infrastructureRef:
+      apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+      kind: PacketCluster
+      name: "${CLUSTER_NAME}"
+    controlPlaneRef:
+      apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
+      kind: TalosControlPlane
+      name: "${CLUSTER_NAME}-control-plane"
+  ---
+  apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+  kind: PacketCluster
+  metadata:
+    name: "${CLUSTER_NAME}"
+  spec:
+    projectID: "${PACKET_PROJECT_ID}"
+    facility: "${FACILITY}"
+  ---
+  apiVersion: cluster.x-k8s.io/v1alpha3
+  kind: MachineDeployment
+  metadata:
+    name: ${CLUSTER_NAME}-worker-a
+    labels:
+      cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
+      pool: worker-a
+  spec:
+    replicas: ${WORKER_MACHINE_COUNT}
+    clusterName: ${CLUSTER_NAME}
+    selector:
+      matchLabels:
+        cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
+        pool: worker-a
+    template:
+      metadata:
+        labels:
+          cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
+          pool: worker-a
+      spec:
+        version: ${KUBERNETES_VERSION}
+        clusterName: ${CLUSTER_NAME}
+        bootstrap:
+          configRef:
+            name: ${CLUSTER_NAME}-worker-a
+            apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+            kind: TalosConfigTemplate
+        infrastructureRef:
+          name: ${CLUSTER_NAME}-worker-a
+          apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+          kind: PacketMachineTemplate
+  ---
+  apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+  kind: PacketMachineTemplate
+  metadata:
+    name: ${CLUSTER_NAME}-worker-a
+  spec:
+    template:
+      spec:
+        OS: custom_ipxe
+        ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64"
+        billingCycle: hourly
+        machineType: "${WORKER_NODE_TYPE}"
+        sshKeys:
+          - "${SSH_KEY}"
+        tags: []
+  ---
+  apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
+  kind: TalosConfigTemplate
+  metadata:
+    name: ${CLUSTER_NAME}-worker-a
+    labels:
+      cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
+  spec:
+    template:
+      spec:
+        generateType: init
+

Inside of TalosControlPlane.spec.controlPlaneConfig.init, I'm very much liking the use of generateType: init paired with configPatches. This enables:

  • configuration to be generated;

  • management of certificates out of the cluster operator's hands;

  • another level of standardisation; and

  • overrides to be added where needed

Notes:

  • the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0

Templating your configuration

Set environment variables for configuration:

  <>
+  export FACILITY=sjc1
+  export KUBERNETES_VERSION=v1.20.2
+  export POD_CIDR=10.244.0.0/16
+  export SERVICE_CIDR=10.96.0.0/12
+  export CONTROLPLANE_NODE_TYPE=c1.small.x86
+  export CONTROL_PLANE_MACHINE_COUNT=3
+  export WORKER_NODE_TYPE=c1.small.x86
+  export WORKER_MACHINE_COUNT=0
+  export SSH_KEY=""
+  export IPXE_URL=$LOAD_BALANCER_IP
+

In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads.

Render the manifests

Render your cluster configuration from the template:

  clusterctl config cluster "$CLUSTER_NAME" \
+    --from ./talos-packet-cluster-template.yaml \
+    -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml
+

Creating the cluster

With the template for the cluster rendered to how wish to deploy it, it's now time to apply it:

  kubectl create ns "$CLUSTER_NAME"
+  kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml
+

The cluster will now be brought up, we can see the progress by taking a look at the resources:

  kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters
+

Note: As expected, the cluster may take some time to appear and be accessible.

Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with:

  kubectl -n "$CLUSTER_NAME" get secrets \
+    "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \
+    | base64 -d > $HOME/.kube/"$CLUSTER_NAME"
+

Using the KubeConfig from the new cluster, check out the status of it:

  kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info
+

Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal:

  kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \
+    create secret generic packet-cloud-config \
+    --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}"
+

Since we're able to talk to the APIServer, we can check how all Pods are doing:

  <>
+  kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\
+    -n kube-system get pods
+

Listing Pods shows that everything is live and in a good state:

NAMESPACE     NAME                                                     READY   STATUS    RESTARTS   AGE
+kube-system   coredns-5b55f9f688-fb2cb                                 1/1     Running   0          25m
+kube-system   coredns-5b55f9f688-qsvg5                                 1/1     Running   0          25m
+kube-system   kube-apiserver-665px                                     1/1     Running   0          19m
+kube-system   kube-apiserver-mz68q                                     1/1     Running   0          19m
+kube-system   kube-apiserver-qfklt                                     1/1     Running   2          19m
+kube-system   kube-controller-manager-6grxd                            1/1     Running   0          19m
+kube-system   kube-controller-manager-cf76h                            1/1     Running   0          19m
+kube-system   kube-controller-manager-dsmgf                            1/1     Running   0          19m
+kube-system   kube-flannel-brdxw                                       1/1     Running   0          24m
+kube-system   kube-flannel-dm85d                                       1/1     Running   0          24m
+kube-system   kube-flannel-sg6k9                                       1/1     Running   0          24m
+kube-system   kube-proxy-flx59                                         1/1     Running   0          24m
+kube-system   kube-proxy-gbn4l                                         1/1     Running   0          24m
+kube-system   kube-proxy-ns84v                                         1/1     Running   0          24m
+kube-system   kube-scheduler-4qhjw                                     1/1     Running   0          19m
+kube-system   kube-scheduler-kbm5z                                     1/1     Running   0          19m
+kube-system   kube-scheduler-klsmp                                     1/1     Running   0          19m
+kube-system   packet-cloud-controller-manager-77cd8c9c7c-cdzfv         1/1     Running   0          20m
+kube-system   pod-checkpointer-4szh6                                   1/1     Running   0          19m
+kube-system   pod-checkpointer-4szh6-talos-metal-control-plane-j29lb   1/1     Running   0          19m
+kube-system   pod-checkpointer-k7w8h                                   1/1     Running   0          19m
+kube-system   pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2   1/1     Running   0          19m
+kube-system   pod-checkpointer-m5wrh                                   1/1     Running   0          19m
+kube-system   pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j   1/1     Running   0          19m
+

With the cluster live, it's now ready for workloads to be deployed!

Talos Configuration

In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use.

Create the directory for the config:

  mkdir -p $HOME/.talos
+

Discover the IP for the first controlPlane:

  export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \
+    get machines \
+    $(kubectl -n "$CLUSTER_NAME" \
+      get machines -l cluster.x-k8s.io/control-plane='' \
+      --no-headers --output=jsonpath='{.items[0].metadata.name}') \
+      -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}')
+

Fetch the talosconfig from the existing cluster:

  kubectl get talosconfig \
+    -n $CLUSTER_NAME \
+    -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \
+    -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml
+

Write in the configuration the endpoint IP and node IP:

  talosctl \
+    --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \
+    config endpoint $TALOS_ENDPOINT
+  talosctl \
+    --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \
+    config node $TALOS_ENDPOINT
+

Now that the talosconfig has been written, try listing all containers:

  <>
+  # removing ip; omit ` | sed ...` for regular use
+  talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x      "/
+

Here's the containers running on this particular node, in containerd (not k8s related):

NODE            NAMESPACE   ID         IMAGE                                  PID    STATUS
+x.x.x.x         system      apid       talos/apid                             3046   RUNNING
+x.x.x.x         system      etcd       gcr.io/etcd-development/etcd:v3.4.14   3130   RUNNING
+x.x.x.x         system      networkd   talos/networkd                         2879   RUNNING
+x.x.x.x         system      routerd    talos/routerd                          2888   RUNNING
+x.x.x.x         system      timed      talos/timed                            2976   RUNNING
+x.x.x.x         system      trustd     talos/trustd                           3047   RUNNING
+

Clean up

Tearing down the entire cluster and resources associated with it, can be achieved by

i. Deleting the cluster:

  kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME"
+

ii. Deleting the namespace:

  kubectl delete ns "$CLUSTER_NAME"
+

iii. Removing local configurations:

  rm \
+    $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \
+    $HOME/.kube/"$CLUSTER_NAME"
+

What have I learned from this?

  • (always learning) how wonderful the Kubernetes community is :: there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group.

  • how modular Cluster-API is :: Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways.

Credits

Integrating Talos into this project would not be possible without help from Andrew Rynhard (Talos Systems), huge thanks to him for reaching out for pairing and co-authoring.

Notes and references


Hope you've enjoyed the output of this project! +Thank you!

Footnotes

Recent Posts

\ No newline at end of file diff --git a/docs/feed.xml b/docs/feed.xml new file mode 100644 index 0000000..b5d0afc --- /dev/null +++ b/docs/feed.xml @@ -0,0 +1,409 @@ +http://localhost:4000clj-rss<html><head><meta charset="utf-8" /><meta content="width=device-width, initial-scale=1.0" name="viewport" /><meta content="" name="author" /><meta content="" name="description" /><title></title><link href="http://localhost:4000/static/css/firn_base.css" rel="stylesheet" /><link href="http://localhost:4000/static/css/ii.css" rel="stylesheet" /></head><body><main><article class="content"><div><div><section></section><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="the-ii-blog"><span class="firn-headline-text"><span>The ii Blog</span></span></h1><section><p><span>News, Views, and Tech How-To's from ii.</span></p></section></div></div></div></article><section id="recent"><h2>Recent Posts</h2><ul><li><a class="title" href="deploying-talos-to-equinix">Cluster-API + Talos + Equinix Metal</a><p>A guide to launching a highly-available cluster with Equinix and Talos</p><p><em>2021-01-21</em></p></li><li><a class="title" href="working-with-orgmode-tables">Working with Org-Mode Tables: Basics</a><p>An introduction to how to create/edit tables with Org-Mode.</p><p><em>2019-03-09</em></p></li></ul></section></main></body></html>Tue, 02 Feb 2021 00:00:00 +0000Indexindex<html><head><meta charset="utf-8" /><meta content="width=device-width, initial-scale=1.0" name="viewport" /><meta content="" name="author" /><meta content="" name="description" /><title></title><link href="http://localhost:4000/static/css/firn_base.css" rel="stylesheet" /><link href="http://localhost:4000/static/css/ii.css" rel="stylesheet" /></head><body><main><article class="content"><div><div><section></section><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="introduction"><span class="firn-headline-text"><span>Introduction</span></span></h1><section><p><span>In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API.</span></p><ul><li><p><span>What is </span><a class="firn-external" href="https://cluster-api.sigs.k8s.io/" target="_blank">Cluster-API</a><span>? ::</span></p></li></ul><blockquote><p><span>Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters.</span></p></blockquote><ul><li><p><span>What is </span><a class="firn-external" href="https://www.talos.dev/" target="_blank">Talos</a><span>? ::</span></p></li></ul><blockquote><p><span>Talos is a modern OS designed to be secure, immutable, and minimal.</span></p></blockquote><ul><li><p><span>What is </span><a class="firn-external" href="https://metal.equinix.com/" target="_blank">Equinix Metal</a><span>? ::</span></p></li></ul><blockquote><p><span>A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes.</span></p></blockquote><p><span>The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities.</span></p><ul><li><p><span>Why is this important? :: In general: Orchestrating a container based OS such as Talos (</span><a class="firn-external" href="http://flatcar-linux.org/" target="_blank">Flatcar</a><span>, </span><a class="firn-external" href="https://getfedora.org/coreos/" target="_blank">Fedora CoreOS</a><span>, or </span><a class="firn-external" href="https://rancher.com/products/rancher/" target="_blank">RancherOS</a><span>) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge.</span></p></li></ul></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="dependencies"><span class="firn-headline-text"><span>Dependencies</span></span></h1><section><p><span>What you'll need for this guide:</span></p><ul><li><p><a class="firn-external" href="https://github.com/talos-systems/talos/releases/tag/v0.8.1" target="_blank">talosctl</a></p></li><li><p><a class="firn-external" href="https://kubernetes.io/docs/tasks/tools/install-kubectl/" target="_blank">kubectl</a></p></li><li><p><a class="firn-external" href="https://github.com/packethost/packet-cli" target="_blank">packet-cli</a></p></li><li><p><span>the ID and API token of existing Equinix Metal project</span></p></li><li><p><span>an existing Kubernetes cluster with a public IP (such as </span><a class="firn-external" href="http://kind.sigs.k8s.io/" target="_blank">kind</a><span>, </span><a class="firn-external" href="https://minikube.sigs.k8s.io/" target="_blank">minikube</a><span>, or a cluster already on Equinix Metal)</span></p></li></ul></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="prelimiary-steps"><span class="firn-headline-text"><span>Prelimiary steps</span></span></h1><section><p><span>In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via </span><code>packet-cli</code><span>.</span></p><p><span>Set the correct project to create and manage resources in:</span></p><pre class="language-tmate"><code class="language-tmate"> read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID +</code></pre><p><span>The API key for your account or project:</span></p><pre class="language-tmate"><code class="language-tmate"> read -p 'PACKET_API_KEY: ' PACKET_API_KEY +</code></pre><p><span>Export the variables to be accessible from </span><code>packet-cli</code><span> and </span><code>clusterctl</code><span> later on:</span></p><pre class="language-tmate"><code class="language-tmate"> export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY +</code></pre><p><span>In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. +We'll need this IP address later for use in booting the servers. +If you have set up your existing cluster differently, it'll just need to be an IP that we can use.</span></p><pre class="language-tmate"><code class="language-tmate"> export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" +</code></pre></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="setting-up-cluster-api"><span class="firn-headline-text"><span>Setting up Cluster-API</span></span></h1><section><p><span>Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster:</span></p><pre class="language-tmate"><code class="language-tmate"> clusterctl init -b talos -c talos -i packet +</code></pre><p><span>This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider.</span></p><p><strong><strong><span>Important</span></strong></strong><span> note:</span></p><ul><li><p><span>the </span><code>bootstrap-talos</code><span> controller in the </span><code>cabpt-system</code><span> namespace must be running a version greater than </span><code>v0.2.0-alpha.8</code><span>. The version can be displayed in with </span><code>clusterctl upgrade plan</code><span> when it's installed.</span></p></li></ul></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="setting-up-matchbox"><span class="firn-headline-text"><span>Setting up Matchbox</span></span></h1><section><p><span>Currently, since Equinix Metal have </span><strong><strong><span>not</span></strong></strong><span> yet added support for Talos, it is necessary to install </span><a class="firn-external" href="https://matchbox.psdn.io/" target="_blank">Matchbox</a><span> to boot the servers (There is an </span><a class="firn-external" href="https://github.com/packethost/packet-images/issues/26" target="_blank">issue</a><span> in progress and </span><a class="firn-external" href="https://feedback.equinixmetal.com/operating-systems/p/talos-as-officially-supported-operating-system" target="_blank">feedback</a><span> for adding support).</span></p><ul><li><p><span>What is Matchbox? ::</span></p></li></ul><blockquote><p><span>Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters.</span></p></blockquote><p><span>Here is the manifest for a basic matchbox installation:</span></p><pre class="language-yaml"><code class="language-yaml"> apiVersion: apps/v1 + kind: Deployment + metadata: + name: matchbox + spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets + --- + apiVersion: v1 + kind: Service + metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 +</code></pre><p><span>Save it as </span><code>matchbox.yaml</code></p><p><span>The manifests above were inspired by the manifests in the </span><a class="firn-external" href="https://github.com/poseidon/matchbox/tree/master/contrib/k8s" target="_blank">matchbox repo</a><span>. +For production it might be wise to use:</span></p><ul><li><p><span>an Ingress with full TLS</span></p></li><li><p><span>a ReadWriteMany storage provider instead hostPath for scaling</span></p></li></ul><p><span>With the manifests ready to go, we'll install Matchbox into the </span><code>matchbox</code><span> namespace on the existing cluster with the following commands:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl create ns matchbox + kubectl -n matchbox apply -f ./matchbox.yaml +</code></pre><p><span>You may need to patch the </span><code>Service.spec.externalIPs</code><span> to have an IP to access it from if one is not populated:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox patch \ + service matchbox \ + -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}" +</code></pre><p><span>Once the pod is live, We'll need to create a directory structure for storing Talos boot assets:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos +</code></pre><p><span>Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.8.1 into the assets folder:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + wget -P /var/lib/matchbox/assets/talos \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/initramfs-amd64.xz \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/vmlinuz-amd64 +</code></pre><p><span>Now that the assets have been downloaded, run a checksum against them to verify:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c "cd /var/lib/matchbox/assets/talos && \ + wget -O- https://github.com/talos-systems/talos/releases/download/v0.8.1/sha512sum.txt 2> /dev/null \ + | sed 's,_out/,,g' \ + | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \ + | sha512sum -c -" +</code></pre><p><span>Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it:</span></p><pre class="language-tmate"><code class="language-tmate"> export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}') +</code></pre><p><a class="firn-external" href="https://matchbox.psdn.io/matchbox/#profiles" target="_blank">Profiles in Matchbox</a><span> are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as </span><code>profile-default-amd64.json</code></p><pre class="language-json"><code class="language-json"> { + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } + } +</code></pre><p><a class="firn-external" href="https://matchbox.psdn.io/matchbox/#groups" target="_blank">Groups in Matchbox</a><span> are a way of letting servers pick up profiles based on selectors. Save this file as </span><code>group-default-amd64.json</code></p><pre class="language-json"><code class="language-json"> { + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } + } +</code></pre><p><span>We'll copy the profile and group into their respective folders:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox \ + cp ./profile-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json + kubectl -n matchbox \ + cp ./group-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json +</code></pre><p><span>List the files to validate that they were written correctly:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c 'ls -alh /var/lib/matchbox/*/' +</code></pre></section><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="testing-matchbox"><span class="firn-headline-text"><span>Testing Matchbox</span></span></h2><section><p><span>Using </span><code>curl</code><span>, we can verify Matchbox's running state:</span></p><pre class="language-tmate"><code class="language-tmate"> curl http://$LOAD_BALANCER_IP:8080 +</code></pre><p><span>To test matchbox, we'll create an invalid userdata configuration for Talos, saving as </span><code>userdata.txt</code><span>:</span></p><pre class="language-text"><code class="language-text">#!talos +</code></pre><p><span>Feel free to use a valid one.</span></p><p><span>Now let's talk to Equinix Metal to create a server pointing to the Matchbox server:</span></p><pre class="language-tmate"><code class="language-tmate"> packet-cli device create \ + --hostname talos-pxe-boot-test-1 \ + --plan c1.small.x86 \ + --facility sjc1 \ + --operating-system custom_ipxe \ + --project-id "$PACKET_PROJECT_ID" \ + --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \ + --userdata-file=./userdata.txt +</code></pre><p><span>In the meanwhile, we can watch the logs to see how things are:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n matchbox logs deployment/matchbox -f --tail=100 +</code></pre><p><span>Looking at the logs, there should be some get requests of resources that will be used to boot the OS.</span></p><p><span>Notes:</span></p><ul><li><p><span>fun fact: you can run Matchbox on Android using </span><a class="firn-external" href="https://f-droid.org/en/packages/com.termux/" target="_blank">Termux</a><span>.</span></p></li></ul></section></div></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="the-cluster"><span class="firn-headline-text"><span>The cluster</span></span></h1><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="preparing-the-cluster"><span class="firn-headline-text"><span>Preparing the cluster</span></span></h2><section><p><span>Here we will declare the template that we will shortly generate our usable cluster from:</span></p><pre class="language-yaml"><code class="language-yaml"> kind: TalosControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/packethost/packet-ccm/releases/download/v1.1.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: "${CLUSTER_NAME}-control-plane" + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: Cluster + metadata: + name: "${CLUSTER_NAME}" + spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + metadata: + name: "${CLUSTER_NAME}" + spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" + --- + apiVersion: cluster.x-k8s.io/v1alpha3 + kind: MachineDeployment + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + --- + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] + --- + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + spec: + template: + spec: + generateType: init +</code></pre><p><span>Inside of </span><code>TalosControlPlane.spec.controlPlaneConfig.init</code><span>, I'm very much liking the use of </span><code>generateType: init</code><span> paired with </span><code>configPatches</code><span>. This enables:</span></p><ul><li><p><span>configuration to be generated;</span></p></li><li><p><span>management of certificates out of the cluster operator's hands;</span></p></li><li><p><span>another level of standardisation; and</span></p></li><li><p><span>overrides to be added where needed</span></p></li></ul><p><span>Notes:</span></p><ul><li><p><span>the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0</span></p></li></ul></section><div class="firn-headline-section firn-headline-section-3"><h3 class="firn-headline firn-headline-3" id="templating-your-configuration"><span class="firn-headline-text"><span>Templating your configuration</span></span></h3><section><p><span>Set environment variables for configuration:</span></p><pre class="language-bash"><code class="language-bash"> <<cluster-config-env-name>> + export FACILITY=sjc1 + export KUBERNETES_VERSION=v1.20.2 + export POD_CIDR=10.244.0.0/16 + export SERVICE_CIDR=10.96.0.0/12 + export CONTROLPLANE_NODE_TYPE=c1.small.x86 + export CONTROL_PLANE_MACHINE_COUNT=3 + export WORKER_NODE_TYPE=c1.small.x86 + export WORKER_MACHINE_COUNT=0 + export SSH_KEY="" + export IPXE_URL=$LOAD_BALANCER_IP +</code></pre><p><span>In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads.</span></p></section></div><div class="firn-headline-section firn-headline-section-3"><h3 class="firn-headline firn-headline-3" id="render-the-manifests"><span class="firn-headline-text"><span>Render the manifests</span></span></h3><section><p><span>Render your cluster configuration from the template:</span></p><pre class="language-tmate"><code class="language-tmate"> clusterctl config cluster "$CLUSTER_NAME" \ + --from ./talos-packet-cluster-template.yaml \ + -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +</code></pre></section></div></div><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="creating-the-cluster"><span class="firn-headline-text"><span>Creating the cluster</span></span></h2><section><p><span>With the template for the cluster rendered to how wish to deploy it, it's now time to apply it:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl create ns "$CLUSTER_NAME" + kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml +</code></pre><p><span>The cluster will now be brought up, we can see the progress by taking a look at the resources:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters +</code></pre><p><span>Note: As expected, the cluster may take some time to appear and be accessible.</span></p><p><span>Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n "$CLUSTER_NAME" get secrets \ + "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \ + | base64 -d > $HOME/.kube/"$CLUSTER_NAME" +</code></pre><p><span>Using the KubeConfig from the new cluster, check out the status of it:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info +</code></pre><p><span>Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \ + create secret generic packet-cloud-config \ + --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}" +</code></pre><p><span>Since we're able to talk to the APIServer, we can check how all Pods are doing:</span></p><pre class="language-bash"><code class="language-bash"> <<cluster-config-env-name>> + kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\ + -n kube-system get pods +</code></pre><p><span>Listing Pods shows that everything is live and in a good state:</span></p><pre class="language-bash"><code class="language-bash">NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-5b55f9f688-fb2cb 1/1 Running 0 25m +kube-system coredns-5b55f9f688-qsvg5 1/1 Running 0 25m +kube-system kube-apiserver-665px 1/1 Running 0 19m +kube-system kube-apiserver-mz68q 1/1 Running 0 19m +kube-system kube-apiserver-qfklt 1/1 Running 2 19m +kube-system kube-controller-manager-6grxd 1/1 Running 0 19m +kube-system kube-controller-manager-cf76h 1/1 Running 0 19m +kube-system kube-controller-manager-dsmgf 1/1 Running 0 19m +kube-system kube-flannel-brdxw 1/1 Running 0 24m +kube-system kube-flannel-dm85d 1/1 Running 0 24m +kube-system kube-flannel-sg6k9 1/1 Running 0 24m +kube-system kube-proxy-flx59 1/1 Running 0 24m +kube-system kube-proxy-gbn4l 1/1 Running 0 24m +kube-system kube-proxy-ns84v 1/1 Running 0 24m +kube-system kube-scheduler-4qhjw 1/1 Running 0 19m +kube-system kube-scheduler-kbm5z 1/1 Running 0 19m +kube-system kube-scheduler-klsmp 1/1 Running 0 19m +kube-system packet-cloud-controller-manager-77cd8c9c7c-cdzfv 1/1 Running 0 20m +kube-system pod-checkpointer-4szh6 1/1 Running 0 19m +kube-system pod-checkpointer-4szh6-talos-metal-control-plane-j29lb 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j 1/1 Running 0 19m +</code></pre><p><span>With the cluster live, it's now ready for workloads to be deployed!</span></p></section></div></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="talos-configuration"><span class="firn-headline-text"><span>Talos Configuration</span></span></h1><section><p><span>In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use.</span></p><p><span>Create the directory for the config:</span></p><pre class="language-tmate"><code class="language-tmate"> mkdir -p $HOME/.talos +</code></pre><p><span>Discover the IP for the first controlPlane:</span></p><pre class="language-tmate"><code class="language-tmate"> export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \ + get machines \ + $(kubectl -n "$CLUSTER_NAME" \ + get machines -l cluster.x-k8s.io/control-plane='' \ + --no-headers --output=jsonpath='{.items[0].metadata.name}') \ + -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}') +</code></pre><p><span>Fetch the </span><code>talosconfig</code><span> from the existing cluster:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl get talosconfig \ + -n $CLUSTER_NAME \ + -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \ + -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml +</code></pre><p><span>Write in the configuration the endpoint IP and node IP:</span></p><pre class="language-tmate"><code class="language-tmate"> talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config endpoint $TALOS_ENDPOINT + talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config node $TALOS_ENDPOINT +</code></pre><p><span>Now that the </span><code>talosconfig</code><span> has been written, try listing all containers:</span></p><pre class="language-bash"><code class="language-bash"> <<cluster-config-env-name>> + # removing ip; omit ` | sed ...` for regular use + talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x "/ +</code></pre><p><span>Here's the containers running on this particular node, in containerd (not k8s related):</span></p><pre class="language-bash"><code class="language-bash">NODE NAMESPACE ID IMAGE PID STATUS +x.x.x.x system apid talos/apid 3046 RUNNING +x.x.x.x system etcd gcr.io/etcd-development/etcd:v3.4.14 3130 RUNNING +x.x.x.x system networkd talos/networkd 2879 RUNNING +x.x.x.x system routerd talos/routerd 2888 RUNNING +x.x.x.x system timed talos/timed 2976 RUNNING +x.x.x.x system trustd talos/trustd 3047 RUNNING +</code></pre></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="clean-up"><span class="firn-headline-text"><span>Clean up</span></span></h1><section><p><span>Tearing down the entire cluster and resources associated with it, can be achieved by</span></p><p><span>i. Deleting the cluster:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME" +</code></pre><p><span>ii. Deleting the namespace:</span></p><pre class="language-tmate"><code class="language-tmate"> kubectl delete ns "$CLUSTER_NAME" +</code></pre><p><span>iii. Removing local configurations:</span></p><pre class="language-tmate"><code class="language-tmate"> rm \ + $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + $HOME/.kube/"$CLUSTER_NAME" +</code></pre></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="what-have-i-learned-from-this?"><span class="firn-headline-text"><span>What have I learned from this?</span></span></h1><section><ul><li><p><span>(always learning) how wonderful the Kubernetes community is :: there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group.</span></p></li><li><p><span>how modular Cluster-API is :: Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways.</span></p></li></ul></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="credits"><span class="firn-headline-text"><span>Credits</span></span></h1><section><p><span>Integrating Talos into this project would not be possible without help from </span><a class="firn-external" href="https://github.com/andrewrynhard" target="_blank">Andrew Rynhard (Talos Systems)</a><span>, huge thanks to him for reaching out for pairing and co-authoring.</span></p></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="notes-and-references"><span class="firn-headline-text"><span>Notes and references</span></span></h1><section><ul><li><p><span>with the new cluster's controlPlane live and available for deployment, the iPXE server could be moved into that cluster - meaning that new servers boot from the cluster that they'll join, making it almost self-contained</span></p></li><li><p><span>cluster configuration as based off of </span><a class="firn-external" href="https://github.com/kubernetes-sigs/cluster-api-provider-packet/blob/479faf06e1337b1e979cb624ca8be015b2a89cde/templates/cluster-template.yaml" target="_blank">cluster-template.yaml from the cluster-api-provider-packet repo</a></p></li><li><p><span>this post has been made to </span><a class="firn-external" href="https://blog.calebwoodbine.com/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal" target="_blank">blog.calebwoodine.com</a><span>, and </span><a class="firn-external" href="https://ii.coop/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal/" target="_blank">talos-system.com/blog</a><span>, but is also available as an </span><a class="firn-external" href="https://github.com/ii/org/blob/master/ii/equinix-metal-capi-talos-kubernetes/README.org" target="_blank">Org file</a></p></li></ul><hr /><p><span>Hope you've enjoyed the output of this project! +Thank you!</span></p></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="footnotes"><span class="firn-headline-text"><span>Footnotes</span></span></h1><section></section></div></div></div></article><section id="recent"><h2>Recent Posts</h2><ul><li><a class="title" href="deploying-talos-to-equinix">Cluster-API + Talos + Equinix Metal</a><p>A guide to launching a highly-available cluster with Equinix and Talos</p><p><em>2021-01-21</em></p></li><li><a class="title" href="working-with-orgmode-tables">Working with Org-Mode Tables: Basics</a><p>An introduction to how to create/edit tables with Org-Mode.</p><p><em>2019-03-09</em></p></li></ul></section></main></body></html>Thu, 21 Jan 2021 00:00:00 +0000Cluster-API + Talos + Equinix Metaldeploying-talos-to-equinix<html><head><meta charset="utf-8" /><meta content="width=device-width, initial-scale=1.0" name="viewport" /><meta content="" name="author" /><meta content="" name="description" /><title></title><link href="http://localhost:4000/static/css/firn_base.css" rel="stylesheet" /><link href="http://localhost:4000/static/css/ii.css" rel="stylesheet" /></head><body><main><article class="content"><div><div><section></section><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="overview"><span class="firn-headline-text"><span>Overview</span></span></h1><section><p><span>Tables are a great way to document related information in a format that's easy to scan. +This document is an introduction to how to create/edit tables with Org-Mode.</span></p></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="table-structure"><span class="firn-headline-text"><span>Table Structure</span></span></h1><section><p><span>Using the </span><code>|</code><span> character, Org-Mode uses it to form the various cells (fields) within a table.</span></p></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="table-navigation"><span class="firn-headline-text"><span>Table Navigation</span></span></h1><section><p><span>Navigating around the table is simple. The </span><code>TAB</code><span> key will move the cursor forward, while using </span><code>Shift-TAB</code><span> combination will move the cursor backwards through the table. +To remove any concerns about the layout of the table, using the </span><code>TAB</code><span> key will normally </span><em><span>re-align</span></em><span> the table automatically. +While in the </span><em><span>insert mode</span></em><span> you can use the </span><code>Enter</code><span> key, also called the </span><em><span>return</span></em><span> key (</span><code>RET</code><span>) to move to the next row of the table.</span></p></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="working-with-columns"><span class="firn-headline-text"><span>Working with Columns</span></span></h1><section><p><span>In the following tables, the keyboard sequence </span><code>M</code><span> is for the </span><em><span>meta</span></em><span> key, which is normally the </span><code>Alt</code><span> key on the standard PC keyboard. The </span><code>S</code><span> is shorthand for the </span><code>Shift</code><span> key. +References to </span><code>LEFT</code><span>, </span><code>RIGHT</code><span>, </span><code>UP</code><span>, </span><code>DOWN</code><span> relate to the arrow keys.</span></p></section><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="moving-columns-around"><span class="firn-headline-text"><span>Moving columns around</span></span></h2><section><table><tr><td><strong><span>Keyboard Sequence</span></strong></td><td><strong><span>Effect</span></strong></td></tr><tr></tr><tr><td><code>M-LEFT</code></td><td><span>Move the current column left</span></td></tr><tr><td><code>M-RIGHT</code></td><td><span>Move the current column right</span></td></tr></table></section></div><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="addremove-a-column"><span class="firn-headline-text"><span>Add/Remove a column</span></span></h2><section><table><tr><td><strong><span>Keyboard Sequence</span></strong></td><td><strong><span>Effect</span></strong></td></tr><tr></tr><tr><td><code>M-S-LEFT</code></td><td><span>Add another column to the table</span></td></tr><tr><td><code>M-S-RIGHT</code></td><td><span>Remove the current focused column</span></td></tr></table></section></div></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="working-with-rows"><span class="firn-headline-text"><span>Working with Rows</span></span></h1><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="moving-them-around"><span class="firn-headline-text"><span>Moving them around</span></span></h2><section><table><tr><td><strong><span>Keyboard Sequence</span></strong></td><td><strong><span>Effect</span></strong></td></tr><tr></tr><tr><td><code>M-UP</code></td><td><span>Move the current row up</span></td></tr><tr><td><code>M-DOWN</code></td><td><span>Move the current row down</span></td></tr></table></section></div><div class="firn-headline-section firn-headline-section-2"><h2 class="firn-headline firn-headline-2" id="addremove-a-row"><span class="firn-headline-text"><span>Add/Remove a row</span></span></h2><section><table><tr><td><strong><span>Keyboard Sequence</span></strong></td><td><strong><span>Effect</span></strong></td></tr><tr></tr><tr><td><code>M-S-UP</code></td><td><span>Remove the current focused row</span></td></tr><tr><td><code>M-S-DOWN</code></td><td><span>Add another row to the table</span></td></tr></table><p><span>When the cursor is at the end of the table, using the </span><code>Enter</code><span> key in </span><em><span>insert mode</span></em><span> </span><strong><span>or</span></strong><span> </span><code>TAB</code><span> key will add another row to the table.</span></p></section></div></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="horizontal-lines"><span class="firn-headline-text"><span>Horizontal lines</span></span></h1><section><table><tr><td><strong><span>Keyboard Sequence</span></strong></td><td><strong><span>Effect</span></strong></td></tr><tr></tr><tr><td><code>C-c</code><span> </span><code>-</code></td><td><span>Add a horizontal line and keep the cursor at the current position</span></td></tr><tr><td><code>C-c</code><span> </span><code>RET</code></td><td><span>Add a horizontal line and then move the cursor below the new line</span></td></tr></table><p><strong><span>Note:</span></strong><span> In Org-Mode it's common to use the keyboard sequence, </span><em><span>control</span></em><span> (</span><code>C</code><span>) key with the letter </span><code>c</code><span> before following it with another key.</span></p></section></div><div class="firn-headline-section firn-headline-section-1"><h1 class="firn-headline firn-headline-1" id="references"><span class="firn-headline-text"><span>References</span></span></h1><section><ul><li><p><a class="firn-external" href="https://orgmode.org/manual/Built_002din-table-editor.html" target="_blank">Orgmode.org: The built-in table editor</a></p></li></ul></section></div></div></div></article><section id="recent"><h2>Recent Posts</h2><ul><li><a class="title" href="deploying-talos-to-equinix">Cluster-API + Talos + Equinix Metal</a><p>A guide to launching a highly-available cluster with Equinix and Talos</p><p><em>2021-01-21</em></p></li><li><a class="title" href="working-with-orgmode-tables">Working with Org-Mode Tables: Basics</a><p>An introduction to how to create/edit tables with Org-Mode.</p><p><em>2019-03-09</em></p></li></ul></section></main></body></html>Sat, 09 Mar 2019 00:00:00 +0000Working with Org-Mode Tables: Basicsworking-with-orgmode-tables \ No newline at end of file diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..74df396 --- /dev/null +++ b/docs/index.html @@ -0,0 +1 @@ +

The ii Blog

News, Views, and Tech How-To's from ii.

Recent Posts

\ No newline at end of file diff --git a/docs/static/css/firn_base.css b/docs/static/css/firn_base.css new file mode 100644 index 0000000..bd6a694 --- /dev/null +++ b/docs/static/css/firn_base.css @@ -0,0 +1,538 @@ +/* + * Firn's default style sheet uses a modified version of: + * MVP.css v1.6.2 - https://github.com/andybrewer/mvp + * Change the default variables below to find a styling you like. + */ + +/* Reset / Overrides */ +*, +*::before, +*::after { + box-sizing: border-box; +} + +/* CSS Variables (light mode) */ + +:root { + --border-radius: 4px; + --box-shadow: 2px 2px 10px; + --color-primary: #3498db; + --color-primary-accent: #3498db14; + --color-bg: #fff; + --color-bg-secondary: #e9e9e9; + --color-secondary: #222; + --color-secondary-accent: #222ab; + --color-shadow: #f4f4f4; + --color-text: #444; + --color-text-secondary: #999; + --font-family: "Arial", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, + Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; + --hover-brightness: 1.2; + --justify-important: center; + --justify-normal: left; + --line-height: 1.65; + --width-card-wide: 800px; + --width-content: 48rem; + + /* Currently, Firn only supports 'todo | done' keywords. */ + --keyword-active: #2ecc71; /* For "active" Todo keywords. */ + --keyword-done: #bdc3c7; /* For "done type keywords" */ + --keyword-size: xx-small; + + /* Headline priorities ([#A] [#B] etc) */ + --priority-a-bg: #c0392b; + --priority-b-bg: #3498db; + --priority-c-bg: #2ecc71; + --priority-size: xx-small; + + /* Headline Tags (:my-tag: :unfinished: etc)*/ + --tag-size: x-small; + --tag-bg: #ecf0f1; +} + +/* CSS Variables (dark mode) */ +@media (prefers-color-scheme: dark) { + :root { + --color-primary: #0097fc; + --color-primary-accent: #555; + --color-bg: #333; + --color-bg-secondary: #555; + --color-shadow: #bbbbbb20; + --color-secondary: #efefef; + --color-secondary-accent: #efefef; + --color-text: #f7f7f7; + --color-text-secondary: #aaa; + --tag-bg: #555; + } +} + +/* Layout */ + +body { + background: var(--color-bg); + color: var(--color-text); + font-family: var(--font-family); + line-height: var(--line-height); + font-size: 15px; + margin: 0; + overflow-x: hidden; +} + +footer, +main { + margin: 0 auto; +} + +.content { + max-width: var(--width-content); + margin: 0 auto; + padding: 32px; +} + +hr { + margin: 2rem 0; + border-top: 1px solid var(--border-col); + border-bottom: none; +} + +/* Headers */ +article header, +div header, +main header { + padding-top: 0; +} + +/* Typography */ +code, +samp { + background-color: var(--color-primary-accent); + border-radius: var(--border-radius); + color: var(--color-text); + display: inline-block; + margin: 0 0.1rem; + padding: 0 0.5rem; + font-size: 0.85em; +} + +details { + margin: 1.3rem 0; +} + +details summary { + font-weight: bold; + cursor: pointer; + padding: 0px; +} + +h1, +h2, +h3, +h4, +h5, +h6 { + line-height: var(--line-height); + margin: 1em 0; +} + +mark { + padding: 0.1rem; +} + +ol, +ul { + margin: 0.125rem 0; + padding-left: 1.25rem; +} + +ol li, +ul li { + padding: 0.125rem 0; +} + +p { + margin: 1rem 0; + padding: 0; +} + +pre { + margin: 1rem 0; + padding: 1rem 0; +} + +pre code, +pre samp { + display: block; + padding: 0.5rem 1rem; + overflow-x: auto; +} + +small { + color: var(--color-text-secondary); +} + +sup { + background-color: var(--color-primary); + border-radius: var(--border-radius); + color: var(--color-bg); + font-size: xx-small; + margin: 0.2rem; + padding: 0.2rem 0.3rem; + position: relative; + top: -2px; +} + +a { + color: var(--color-secondary); + text-decoration: none; +} + +a:hover { + filter: brightness(var(--hover-brightness)); + text-decoration: underline; +} + +/* -- Images */ + +figure { + margin: 0; + padding: 0; +} + +figure img { + max-width: 100%; +} + +figure figcaption { + color: var(--color-text-secondary); +} + +/* -- Tables */ +table { + border: 1px solid var(--color-bg-secondary); + border-radius: var(--border-radius); + border-spacing: 0; + display: inline-block; + max-width: 100%; + overflow-x: auto; + padding: 0; + white-space: nowrap; +} + +table td, +table th, +table tr { + padding: 0.4rem 0.8rem; + font-size: 0.9em; +} + +/* Firn currently can't discern if a table has a ,*/ +/* so we style the first element.*/ +table tr:first-child { + background-color: var(--color-primary); + border-collapse: collapse; + border-radius: var(--border-radius); + color: var(--color-bg); + margin: 0; + padding: 0; +} + +table thead th:first-child { + border-top-left-radius: var(--border-radius); +} + +table thead th:last-child { + border-top-right-radius: var(--border-radius); +} + +table thead th:first-child, +table tr td:first-child { + text-align: var(--justify-normal); +} + +table tr:nth-child(even) { + background-color: var(--color-primary-accent); +} + +blockquote { + display: block; + font-style: italic; + line-height: var(--line-height); + padding: 1.0rem; + background: var(--color-primary-accent); + margin: 0; + border-left: 2px solid var(--color-primary); +} + +/* -- Firn Org Tweaks ---- */ + +/* Because firn spits out nested content (for example, the parser spits out p + tags within li tags), sometimes you may need to style the children of html + elements to get the spacing you like.*/ + +li > p { + margin: 0; + padding: 0; +} + +/* -- Firn "Components" ---- */ + +/* Images */ +.firn-img-with-caption { + display: flex; + flex-direction: column; +} + +.firn-img-with-caption img { + align-self: flex-start; +} + +.firn-img-caption { + font-style: italic; +} + +/* Headline Keywords and Priority */ + +/* This applies to ALL h1-h6 */ +.firn-headline { + display: flex; + align-items: center; /* center align useful for aligning tags + keywords */ +} + +.firn-headline-section { + margin-bottom: 32px; +} + +.firn-headline-priority { + background-color: var(--color-secondary); + border-radius: var(--border-radius); + color: white; + font-size: xx-small; + font-weight: bold; + margin-right: 0.5rem; + padding: 0.2rem 0.3rem; + font-size: var(--priority-size); +} + +.firn-headline-priority__A { + background-color: var(--priority-a-bg); +} + +.firn-headline-priority__B { + background-color: var(--priority-b-bg); + font-size: var(--priority-size); +} + +.firn-headline-priority__C { + background-color: var(--priority-c-bg); + font-size: var(--priority-size); +} + +.firn-headline-keyword__DONE { + background-color: var(--keyword-done); + border-radius: var(--border-radius); + color: var(--color-bg); + font-size: var(--keyword-size); + font-weight: bold; + margin-right: 0.5rem; + padding: 0.2rem 0.3rem; + position: relative; +} + +.firn-headline-keyword__TODO { + background-color: var(--keyword-active); + border-radius: var(--border-radius); + color: var(--color-bg); + font-size: xx-small; + font-weight: bold; + margin-right: 0.5rem; + padding: 0.2rem 0.3rem; + position: relative; +} + +.firn-headline-text { + display: flex; + flex: 1; + align-items: center; +} + +.firn-cookie { +} + +/* Org Footnotes */ + +.firn-footnote-ref { +} + +.firn-footnote-def { +} + +/* Org Property Drawers */ + +.firn-properties { + background-color: var(--color-bg-secondary); +} + +.firn-property-key { +} + +.firn-property-value { +} + +/* Styling for internal and external links */ + +.firn-external { +} + +.firn-internal { +} + +/* If you choose to render :PROPERTIES: drawers, you may style them: */ +.firn-properties { + background: var(--color-bg-secondary); + padding: 8px; +} +.firn-property-key { +} +.firn-property-value { +} + +/* -- Folding ---------------------------------------------------------------- */ + +/* Folding leverages html
and tags to emulate the folding + feature of org-mode. This is a bit hacky; we hide the regular heading (so + that it still exists so it is anchorable, still has tags, priority etc). +*/ + +/* Headline text is hidden when using folding; it is instead rendered in a + tag. */ +.firn-headline-text-hidden { + visibility: collapse; + overflow: auto; + flex: 1; +} + +/* This moves tags, priority, other non-content text in headings up to + approximately where the tag is. */ +.firn-headline-hidden { + margin-top: -2.6em; + justify-content: right; + display: flex; +} + +/* If folding, we have to style the tags to make them look like headings. */ + +/* Set the parent element, so we can size-down on mobile. */ +.firn-fold { + font-size: 15px; +} +.firn-headline-summary-1 { + font-size: 1.3em; + margin: 1em 0; +} +.firn-headline-summary-2 { + font-size: 1.15em; + margin: 1em 0; +} +.firn-headline-summary-3 { + font-size: 1.05em; + margin: 1em 0; +} +.firn-headline-summary-4 { + font-size: 1em; + margin: 1em 0; +} +.firn-headline-summary-5 { + font-size: 0.85em; + margin: 1em 0; +} +.firn-headline-summary-6 { + font-size: 0.75em; + margin: 1em 0; +} + +@media (max-width: 768px) { + .firn-fold { + font-size: 12px; + } +} + +/* Sitemap, breadcrumbs, siblings files, related files. ---------------------- */ + +.firn-sitemap { +} + +.firn-sitemap-item--parent { + list-style-type: none; +} + +.firn-breadcrumbs { +} + +.firn-file-navigation { + display: flex; + justify-content: space-between; +} + +/* Tags (Firn / Org tags) ---------------------------------------------------- */ + +/* There are two types of tags when using Firn: "org-tags" and "firn-tags" +/* org-tags are your standard org-mode heading tags: ":mytag:"*/ +/* firn-tags are file-based tags that allows tagging entire files.*/ + +/* Firn tags (tags that are per file basis.) */ + +.firn-file-tags { +} +.firn-file-tags-container { +} +.firn-file-tag-name { + font-size: 1.2em; + margin: 1em 0; + text-transform: capitalize; +} +.firn-file-tag-list { +} +.firn-file-tag-item { +} +.firn-file-tag-link { +} + +/* Org tags (tags that are per heading basis.) */ + +/* How an org tag (ex - `:mytag:`) looks when rendered alongside an org heading. */ +.firn-org-tag { + background-color: var(--tag-bg); + border-radius: var(--border-radius); + color: var(--color-text); + font-size: var(--tag-size); + font-weight: 400; + margin: 0 4px; + padding: 0.2rem 0.4rem; +} + +.firn-org-tag:hover { + filter: none; +} + +/* Optional: Adds a "#" before an org-tag */ +.firn-org-tag::before { + content: "#"; +} + +/* The following css classes style the result of rendering your org-tags list via*/ +/* `(render :org-tags)` */ +.firn-org-tags { +} +.firn-org-tags-container { +} +.firn-org-tag-name { + font-size: 1.2em; + margin: 1em 0; + text-transform: capitalize; +} +.firn-org-tag-list { +} +.firn-org-tag-item { +} +.firn-org-tag-link { +} diff --git a/docs/static/css/ii.css b/docs/static/css/ii.css new file mode 100644 index 0000000..e9ade62 --- /dev/null +++ b/docs/static/css/ii.css @@ -0,0 +1,31 @@ +:root { + --cA: aliceblue; + --cB: midnightblue; + --cC: #fff1bc; +} +body { + background: var(--cA); + color: var(--cB); +} + +section#recent { + max-width: var(--width-content); + margin: 0 auto; + padding: 32px; +} + +section#recent ul { + display: flex; + flex-wrap: wrap; + padding: 0; +} +section#recent li { + max-width: 33%; + list-style-type: none; + background: var(--cC); + padding: 1rem; + margin: 0.25rem; +} +section#recent a.title { + font-size: 1.5rem; +} diff --git a/docs/tags.html b/docs/tags.html new file mode 100644 index 0000000..2542e20 --- /dev/null +++ b/docs/tags.html @@ -0,0 +1 @@ +

Org Tags


File Tags

\ No newline at end of file diff --git a/docs/working-with-orgmode-tables.html b/docs/working-with-orgmode-tables.html new file mode 100644 index 0000000..62b10b0 --- /dev/null +++ b/docs/working-with-orgmode-tables.html @@ -0,0 +1,5 @@ +

Overview

Tables are a great way to document related information in a format that's easy to scan. +This document is an introduction to how to create/edit tables with Org-Mode.

Table Structure

Using the | character, Org-Mode uses it to form the various cells (fields) within a table.

Table Navigation

Navigating around the table is simple. The TAB key will move the cursor forward, while using Shift-TAB combination will move the cursor backwards through the table. +To remove any concerns about the layout of the table, using the TAB key will normally re-align the table automatically. +While in the insert mode you can use the Enter key, also called the return key (RET) to move to the next row of the table.

Working with Columns

In the following tables, the keyboard sequence M is for the meta key, which is normally the Alt key on the standard PC keyboard. The S is shorthand for the Shift key. +References to LEFT, RIGHT, UP, DOWN relate to the arrow keys.

Moving columns around

Keyboard SequenceEffect
M-LEFTMove the current column left
M-RIGHTMove the current column right

Add/Remove a column

Keyboard SequenceEffect
M-S-LEFTAdd another column to the table
M-S-RIGHTRemove the current focused column

Working with Rows

Moving them around

Keyboard SequenceEffect
M-UPMove the current row up
M-DOWNMove the current row down

Add/Remove a row

Keyboard SequenceEffect
M-S-UPRemove the current focused row
M-S-DOWNAdd another row to the table

When the cursor is at the end of the table, using the Enter key in insert mode or TAB key will add another row to the table.

Horizontal lines

Keyboard SequenceEffect
C-c -Add a horizontal line and keep the cursor at the current position
C-c RETAdd a horizontal line and then move the cursor below the new line

Note: In Org-Mode it's common to use the keyboard sequence, control (C) key with the letter c before following it with another key.

Recent Posts

\ No newline at end of file diff --git a/elfeed.org b/elfeed.org new file mode 100644 index 0000000..43b741e --- /dev/null +++ b/elfeed.org @@ -0,0 +1,10 @@ +#+TITLE: Our RSS Feeds +This is intended to be used with the doom emacs rss module, with +org enabled +* root :elfeed: +** Envoy :envoy: +*** [[https://blog.envoyproxy.io/feed][Envoy Proxy Blog]] +*** [[https://medium.com/@htuch][Harvey Tuch Blog]] +** Programming :programming: +*** [[https://this-week-in-rust.org/rss.xml][This Week in Rust]] :rust: +** CNCF :cncf: +*** [[https://www.aniszczyk.org/rss][Chris Aniczyck's Blog]] :envoy: diff --git a/emacs/forge.org b/emacs/forge.org index 2a92bf2..d543a7a 100644 --- a/emacs/forge.org +++ b/emacs/forge.org @@ -105,6 +105,5 @@ index 7c7fef8db3..226b9df6f3 100644 # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/emacs/togetherly.org b/emacs/togetherly.org index 7267e09..5c2479f 100644 --- a/emacs/togetherly.org +++ b/emacs/togetherly.org @@ -49,6 +49,5 @@ Then Restart emacs / reload spacemacs # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/github/cncf/apisnoop/issues/84_cleanup-apisnoop-box.org b/github/cncf/apisnoop/issues/84_cleanup-apisnoop-box.org index baaf676..48e4f00 100644 --- a/github/cncf/apisnoop/issues/84_cleanup-apisnoop-box.org +++ b/github/cncf/apisnoop/issues/84_cleanup-apisnoop-box.org @@ -204,6 +204,5 @@ We realized that, instead of cleaning up the existing box, it would be more prod # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/github/cncf/apisnoop/issues/89.org b/github/cncf/apisnoop/issues/89.org index 73973ed..2aaa7e4 100644 --- a/github/cncf/apisnoop/issues/89.org +++ b/github/cncf/apisnoop/issues/89.org @@ -169,6 +169,5 @@ I am not sure what's the best way to manage this for long-term reliability and s # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/github/cncf/apisnoop/webui/backend.org b/github/cncf/apisnoop/webui/backend.org index 4311777..9ddc37e 100644 --- a/github/cncf/apisnoop/webui/backend.org +++ b/github/cncf/apisnoop/webui/backend.org @@ -201,5 +201,4 @@ We only have a single method here, which is setup. We don't want this to be an :END: # Local Variables: -# org-confirm-babel-evaluate: nil # End: diff --git a/gpg.org b/gpg.org index 0684d63..9f3b000 100644 --- a/gpg.org +++ b/gpg.org @@ -87,6 +87,5 @@ sub elg1024 2019-01-28 [E] # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/blog-infra.org b/ii/blog-infra.org index d9ac622..696e622 100644 --- a/ii/blog-infra.org +++ b/ii/blog-infra.org @@ -478,6 +478,5 @@ gitlab.ii.coop | 2019-02-12T03:23:22.020131991Z - create symlink at # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/cloud-init.org b/ii/cloud-init.org index 642c0d0..db765d3 100644 --- a/ii/cloud-init.org +++ b/ii/cloud-init.org @@ -248,6 +248,5 @@ tmate -S /tmp/kind.kind-ci-box.iisocket new-session -A -s kind -n main \ # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/cluster/README.org b/ii/cluster/README.org new file mode 100644 index 0000000..ae3620e --- /dev/null +++ b/ii/cluster/README.org @@ -0,0 +1,3068 @@ +#+TITLE: ii cluster + +#+begin_quote +A cluster for things that ii uses both internally and externally +#+end_quote + +* Initialize the packet plugin for Cluster-API +Export credentials for Cluster-API + Packet running in kind to use +#+begin_src tmate :window cluster :session packet-cluster-api :noweb yes +read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID && \ +export PACKET_PROJECT_ID && \ +read -p 'PACKET_API_KEY: ' PACKET_API_KEY && \ +export PACKET_API_KEY && \ +clusterctl init --infrastructure=packet +#+end_src + +* Set up cluster +#+NAME: Cluster-API manifests +#+begin_src yaml :tangle ./gitlab-cluster-capi-template.yaml +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + postKubeadmCommands: + - | + cat <> /etc/network/interfaces + auto lo:0 + iface lo:0 inet static + address {{ .controlPlaneEndpoint }} + netmask 255.255.255.255 + EOF + - systemctl restart networking + - mkdir -p ~/.kube/ + - cp /etc/kubernetes/admin.conf ~/.kube/config + - 'kubectl create secret generic -n kube-system metal-cloud-config --from-literal=cloud-sa.json=''{"apiKey": "{{ .apiKey }}","projectID": "${PROJECT_ID}","loadbalancer":"metallb:///"}''' + - kubectl apply -f https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.2.2/deployment.yaml + - kubectl taint node --all node-role.kubernetes.io/master- + preKubeadmCommands: + - sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - swapoff -a + - mount -a + - | + cat < /etc/modules-load.d/containerd.conf + overlay + br_netfilter + EOF + - modprobe overlay + - modprobe br_netfilter + - | + cat < /etc/sysctl.d/99-kubernetes-cri.conf + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + EOF + - sysctl --system + - apt-get -y update + - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list + - apt-get update -y + - TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//') + - RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1) + - apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION} + - systemctl daemon-reload + - systemctl enable containerd + - systemctl start containerd + - ping -c 3 -q {{ .controlPlaneEndpoint }} && echo OK || ip addr add {{ .controlPlaneEndpoint }} dev lo +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + OS: "${NODE_OS:=ubuntu_18_04}" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PROJECT_ID}" + facility: "${FACILITY}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + OS: "${NODE_OS:=ubuntu_18_04}" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +kind: KubeadmConfigTemplate +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-worker-a" +spec: + template: + spec: + preKubeadmCommands: + - sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - swapoff -a + - mount -a + - | + cat < /etc/modules-load.d/containerd.conf + overlay + br_netfilter + EOF + - modprobe overlay + - modprobe br_netfilter + - | + cat < /etc/sysctl.d/99-kubernetes-cri.conf + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + EOF + - sysctl --system + - apt-get -y update + - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list + - apt-get update -y + - TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//') + - RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1) + - apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION} + - systemctl daemon-reload + - systemctl enable containerd + - systemctl start containerd + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +#+end_src + +#+NAME: Start a window +#+begin_src tmate :dir . :window cluster +#+end_src + +#+NAME: Generate cluster-api manifests +#+begin_src tmate :dir . :window cluster :noweb yes +export CLUSTER_NAME="ii-coop" +export FACILITY=sjc1 +export KUBERNETES_VERSION=v1.21.1 +export POD_CIDR=10.244.0.0/16 +export SERVICE_CIDR=10.96.0.0/12 +export NODE_OS=ubuntu_20_04 +export CONTROLPLANE_NODE_TYPE=m1.xlarge.x86 +export CONTROL_PLANE_MACHINE_COUNT=3 +export WORKER_NODE_TYPE=$CONTROLPLANE_NODE_TYPE +export WORKER_MACHINE_COUNT=0 +export SSH_KEY="" +export PROJECT_ID=$PACKET_PROJECT_ID +clusterctl config cluster "$CLUSTER_NAME" --from ./gitlab-cluster-capi-template.yaml -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +#+NAME: Create box +#+begin_src tmate :dir . :window cluster +kubectl create ns ii-coop 2> /dev/null +kubectl -n ii-coop apply -f ./ii-coop-cluster-capi.yaml +#+end_src + +The cluster will now be brought up, we can see the progress by taking a look at the resources: +#+begin_src tmate +kubectl -n ii-coop get machines,clusters,packetmachines,packetclusters +#+end_src + +Note that the nodes in the cluster will remain not ready until the CNI is applied. + +#+NAME: Get Kubeconfig +#+begin_src tmate :dir . :window cluster +kubectl -n ii-coop get secret ii-coop-kubeconfig -o=jsonpath='{.data.value}' | base64 -d > ~/.kube/config-ii-coop +export KUBECONFIG=~/.kube/config-ii-coop +#+end_src + +#+NAME: Ensure all nodes are scheduable +#+begin_src tmate :dir . :window cluster +kubectl taint node --all node-role.kubernetes.io/master- +#+end_src + +* Export KUBECONFIG to ii-coop +#+begin_src tmate :window cluster +export KUBECONFIG=~/.kube/config-ii-coop +#+end_src + +* CNI + +#+NAME: Weave CNI +#+begin_src tmate :dir . :window cluster +curl -o weave-net.yaml -L "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.IPALLOC_RANGE=192.168.0.0/16" +kubectl apply -f ./weave-net.yaml +#+end_src + +* Helm-Operator +#+NAME: Helm-Operator +#+begin_src tmate :dir . :window cluster +curl -o ./helm-operator-crds.yaml -L https://raw.githubusercontent.com/fluxcd/helm-operator/1.4.0/deploy/crds.yaml + +helm repo add fluxcd https://charts.fluxcd.io +helm template helm-operator --create-namespace fluxcd/helm-operator \ + --version 1.4.0 \ + --namespace helm-operator \ + --set helm.versions=v3 > ./helm-operator.yaml +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl create namespace helm-operator +kubectl apply -f ./helm-operator-crds.yaml -f ./helm-operator.yaml +#+end_src + +* Rook + Ceph +#+begin_src tmate :dir . :window cluster +kubectl create ns rook-ceph +#+end_src + +#+begin_src shell :results silent +curl -s -L -o ./rook-ceph-common.yaml https://github.com/rook/rook/raw/v1.7.2/cluster/examples/kubernetes/ceph/common.yaml +curl -s -L -o ./rook-ceph-crds.yaml https://github.com/rook/rook/raw/v1.7.2/cluster/examples/kubernetes/ceph/crds.yaml +curl -s -L -o ./rook-ceph-operator.yaml https://github.com/rook/rook/raw/v1.7.2/cluster/examples/kubernetes/ceph/operator.yaml +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./rook-ceph-crds.yaml -f ./rook-ceph-common.yaml -f ./rook-ceph-operator.yaml +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-cluster.yaml +################################################################################################################# +# Define the settings for the rook-ceph cluster with common settings for a production cluster. +# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required +# in this example. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v13 is mimic, v14 is nautilus, and v15 is octopus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v15.2.8-20201217 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: ceph/ceph:v16.2.5 + # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. + # Future versions such as `pacific` would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + mon: + # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR. + - name: pg_autoscaler + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: rook-ceph + network: + # enable host networking + #provider: host + # EXPERIMENTAL: enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + #public: public-conf --> NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# topologySpreadConstraints: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# Monitor deployments may contain an anti-affinity rule for avoiding monitor +# collocation on the same node. This is a required rule when host network is used +# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a +# preferred rule with weight: 50. +# osd: +# mgr: +# cleanup: + annotations: +# all: +# mon: +# osd: +# cleanup: +# prepareosd: +# If no mgr annotations are set, prometheus scrape annotations will be set by default. +# mgr: + labels: +# all: +# mon: +# osd: +# cleanup: +# mgr: +# prepareosd: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: +# prepareosd: +# crashcollector: +# logcollector: +# cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false +# priorityClassNames: +# all: rook-ceph-default-priority-class +# mon: rook-ceph-mon-priority-class +# osd: rook-ceph-osd-priority-class +# mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + deviceFilter: "^sd[c-f]" + # config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: false + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./rook-ceph-cluster.yaml +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-pool-storageclass.yaml +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + failureDomain: host + replicated: + size: 3 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block + annotations: + storageclass.kubernetes.io/is-default-class: "true" +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.rbd.csi.ceph.com +parameters: + # clusterID is the namespace where the rook cluster is running + clusterID: rook-ceph + # Ceph pool into which the RBD image shall be created + pool: replicapool + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# Delete the rbd volume when a PVC is deleted +reclaimPolicy: Delete +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./rook-ceph-pool-storageclass.yaml +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-pvc-test.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-block +--- +apiVersion: v1 +kind: Pod +metadata: + name: rook-ceph-pvc-test +spec: + nodeName: ii-coop-control-plane-nbvt9 + containers: + - name: rook-ceph-pvc-test + image: alpine:3.12 + command: + - sleep + - infinity + volumeMounts: + - name: rook-ceph-pvc-test + mountPath: /mnt + volumes: + - name: rook-ceph-pvc-test + persistentVolumeClaim: + claimName: rook-ceph-pvc-test +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl -n default apply -f ./rook-ceph-pvc-test.yaml +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl -n default exec -it rook-ceph-pvc-test -- sh +#+end_src + +#+NAME: RWM storageClass +#+begin_src yaml :tangle ./rook-ceph-shared-pool-storageclass.yaml +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: rook-ceph-shared + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: 3 + dataPools: + - replicated: + size: 3 + preservePoolsOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-shared +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.cephfs.csi.ceph.com +parameters: + # clusterID is the namespace where operator is deployed. + clusterID: rook-ceph + + # CephFS filesystem name into which the volume shall be created + fsName: rook-ceph-shared + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: rook-ceph-shared-data0 + + # Root path of an existing CephFS volume + # Required for provisionVolume: "false" + # rootPath: /absolute/path + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + +reclaimPolicy: Delete +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./rook-ceph-shared-pool-storageclass.yaml +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-pvc-shared-test.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-shared-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-shared +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl -n default apply -f ./rook-ceph-pvc-shared-test.yaml +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl -n default describe pvc rook-ceph-pvc-shared-test +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-dashboard.yaml +apiVersion: v1 +kind: Service +metadata: + name: rook-ceph-mgr-dashboard-external-https + namespace: rook-ceph + labels: + app: rook-ceph-mgr + rook_cluster: rook-ceph +spec: + ports: + - name: dashboard + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app: rook-ceph-mgr + rook_cluster: rook-ceph + sessionAffinity: None + type: NodePort +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./rook-ceph-dashboard.yaml +#+end_src + +#+begin_src tmate :dir . :window cluster +echo https://$(kubectl get node -o wide $(kubectl -n rook-ceph get pod -o wide | grep mgr | awk '{print $7}') | awk '{print $7}' | tail -1):$(kubectl -n rook-ceph get svc rook-ceph-mgr-dashboard-external-https -o=jsonpath='{.spec.ports[0].nodePort}') +echo admin :: $(kubectl -n rook-ceph get secrets rook-ceph-dashboard-password -o=jsonpath='{.data.password}' | base64 -d ; echo) +#+end_src + +*** Debug +#+begin_src yaml :tangle ./rook-ceph-toolbox.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: rook-ceph-tools + namespace: rook-ceph + labels: + app: rook-ceph-tools +spec: + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: rook/ceph:v1.7.2 + command: ["/tini"] + args: ["-g", "--", "/usr/local/bin/toolbox.sh"] + imagePullPolicy: IfNotPresent + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-username + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-secret + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - name: mon-endpoint-volume + mountPath: /etc/rook + volumes: + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + - name: ceph-config + emptyDir: {} + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 5 +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./rook-ceph-toolbox.yaml +#+end_src + +#+begin_src tmate :dir . :window cluster +kubectl -n rook-ceph exec -it daemonset/rook-ceph-tools -- bash +#+end_src + +* Set up cluster apps + +#+NAME: Get LoadBalancer IP +#+begin_src tmate :dir . :window cluster +export LOAD_BALANCER_IP=$(kubectl -n kube-system get cm kubeadm-config -o=jsonpath='{.data.ClusterConfiguration}' | yq e '.controlPlaneEndpoint' -P - | cut -d ':' -f1) +#+end_src + +#+begin_src shell :results silent +curl -s -o postgres-operator-crd.yaml -L https://github.com/zalando/postgres-operator/raw/v1.7.0/manifests/postgresql.crd.yaml +curl -s -L https://github.com/zalando/postgres-operator/raw/v1.7.0/manifests/operator-service-account-rbac.yaml | sed 's/namespace: default/namespace: postgres-operator/g' > ./postgres-operator-serviceaccount.yaml +curl -s -o postgres-operator-apiservice.yaml -L https://github.com/zalando/postgres-operator/raw/v1.7.0/manifests/api-service.yaml +curl -s -o postgres-operator.yaml -L https://github.com/zalando/postgres-operator/raw/v1.7.0/manifests/postgres-operator.yaml +#+end_src + +#+NAME: Postgres operator +#+begin_src yaml :tangle ./postgres-operator-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + # additional_pod_capabilities: "SYS_NICE" + # additional_secret_mount: "some-secret-name" + # additional_secret_mount_path: "/some/dir" + api_port: "8080" + aws_region: eu-central-1 + cluster_domain: cluster.local + cluster_history_entries: "1000" + cluster_labels: application:spilo + cluster_name_label: cluster-name + # connection_pooler_default_cpu_limit: "1" + # connection_pooler_default_cpu_request: "500m" + # connection_pooler_default_memory_limit: 100Mi + # connection_pooler_default_memory_request: 100Mi + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18" + # connection_pooler_max_db_connections: 60 + # connection_pooler_mode: "transaction" + # connection_pooler_number_of_instances: 2 + # connection_pooler_schema: "pooler" + # connection_pooler_user: "pooler" + # custom_service_annotations: "keyx:valuez,keya:valuea" + # custom_pod_annotations: "keya:valuea,keyb:valueb" + db_hosted_zone: db.example.com + debug_logging: "true" + # default_cpu_limit: "1" + # default_cpu_request: 100m + # default_memory_limit: 500Mi + # default_memory_request: 100Mi + # delete_annotation_date_key: delete-date + # delete_annotation_name_key: delete-clustername + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1 + # downscaler_annotations: "deployment-time,downscaler/*" + # enable_admin_role_for_users: "true" + # enable_crd_validation: "true" + # enable_cross_namespace_secret: "false" + # enable_database_access: "true" + enable_ebs_gp3_migration: "false" + # enable_ebs_gp3_migration_max_size: "1000" + # enable_init_containers: "true" + # enable_lazy_spilo_upgrade: "false" + enable_master_load_balancer: "false" + enable_pgversion_env_var: "true" + enable_pod_antiaffinity: "true" + enable_pod_disruption_budget: "true" + # enable_postgres_team_crd: "false" + # enable_postgres_team_crd_superusers: "false" + enable_replica_load_balancer: "false" + # enable_shm_volume: "true" + # enable_sidecars: "true" + enable_spilo_wal_path_compat: "true" + enable_team_member_deprecation: "false" + # enable_team_superuser: "false" + enable_teams_api: "false" + # etcd_host: "" + external_traffic_policy: "Cluster" + # gcp_credentials: "" + # kubernetes_use_configmaps: "false" + # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" + # infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole" + # inherited_annotations: owned-by + # inherited_labels: application,environment + # kube_iam_role: "" + # log_s3_bucket: "" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0" + # logical_backup_google_application_credentials: "" + logical_backup_job_prefix: "logical-backup-" + logical_backup_provider: "s3" + # logical_backup_s3_access_key_id: "" + logical_backup_s3_bucket: "my-bucket-url" + # logical_backup_s3_region: "" + # logical_backup_s3_endpoint: "" + # logical_backup_s3_secret_access_key: "" + logical_backup_s3_sse: "AES256" + logical_backup_schedule: "30 00 * * *" + major_version_upgrade_mode: "manual" + master_dns_name_format: "{cluster}.{team}.{hostedzone}" + # master_pod_move_timeout: 20m + # max_instances: "-1" + # min_instances: "-1" + # min_cpu_limit: 250m + # min_memory_limit: 250Mi + # minimal_major_version: "9.5" + # node_readiness_label: "" + # oauth_token_secret_name: postgresql-operator + # pam_configuration: | + # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees + # pam_role_name: zalandos + pdb_name_format: "postgres-{cluster}-pdb" + pod_antiaffinity_topology_key: "kubernetes.io/hostname" + pod_deletion_wait_timeout: 10m + # pod_environment_configmap: "default/my-custom-config" + # pod_environment_secret: "my-custom-secret" + pod_label_wait_timeout: 10m + pod_management_policy: "ordered_ready" + # pod_priority_class_name: "postgres-pod-priority" + pod_role_label: spilo-role + # pod_service_account_definition: "" + pod_service_account_name: "postgres-pod" + # pod_service_account_role_binding_definition: "" + pod_terminate_grace_period: 5m + # postgres_superuser_teams: "postgres_superusers" + # protected_role_names: "admin" + ready_wait_interval: 3s + ready_wait_timeout: 30s + repair_period: 5m + replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" + replication_username: standby + resource_check_interval: 3s + resource_check_timeout: 10m + resync_period: 30m + ring_log_lines: "100" + role_deletion_suffix: "_deleted" + secret_name_template: "{username}.{cluster}.credentials" + # sidecar_docker_images: "" + # set_memory_request_to_limit: "false" + spilo_allow_privilege_escalation: "true" + # spilo_runasuser: 101 + # spilo_runasgroup: 103 + # spilo_fsgroup: 103 + spilo_privileged: "false" + storage_resize_mode: "pvc" + super_username: postgres + # target_major_version: "13" + # team_admin_role: "admin" + # team_api_role_configuration: "log_statement:all" + # teams_api_url: http://fake-teams-api.default.svc.cluster.local + # toleration: "" + # wal_az_storage_account: "" + # wal_gs_bucket: "" + # wal_s3_bucket: "" + watched_namespace: "*" # listen to all namespaces + workers: "8" +#+end_src + +#+NAME: Install Postgres-Operator +#+begin_src tmate :dir . :window cluster +kubectl create ns postgres-operator 2> /dev/null +kubectl -n postgres-operator apply \ + -f ./postgres-operator-crd.yaml \ + -f ./postgres-operator-serviceaccount.yaml \ + -f ./postgres-operator-configmap.yaml \ + -f ./postgres-operator-apiservice.yaml \ + -f ./postgres-operator.yaml +kubectl -n postgres-operator wait pod --for=condition=Ready --selector=app.kubernetes.io/name=postgres-operator --timeout=200s +#+end_src + +#+NAME: Cert-Manager +#+begin_src tmate :dir . :window cluster +curl -O -L https://github.com/jetstack/cert-manager/releases/download/v1.1.0/cert-manager.yaml +kubectl apply -f ./cert-manager.yaml +#+end_src + +#+NAME: MetalLB +#+begin_src tmate :dir . :window cluster +kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl apply -f - -n kube-system +curl -o metallb-namespace.yaml -L https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml +curl -O -L https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml +kubectl apply -f ./metallb-namespace.yaml +kubectl apply -f ./metallb.yaml +kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +#+end_src + +#+NAME: Metrics-Server +#+begin_src yaml :tangle ./metrics-server.yaml +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: metrics-server + namespace: kube-system +spec: + releaseName: metrics-server + chart: + repository: https://olemarkus.github.io/metrics-server + name: metrics-server + version: 2.11.2 + values: + args: + - --logtostderr + - --kubelet-preferred-address-types=InternalIP + - --kubelet-insecure-tls +#+end_src + +#+NAME: install metrics-server +#+begin_src tmate :dir . :window cluster + kubectl apply -f ./metrics-server.yaml +#+end_src + +#+NAME: nginx-ingress +#+begin_src yaml :tangle ./nginx-ingress.yaml +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + releaseName: nginx-ingress + chart: + repository: https://kubernetes.github.io/ingress-nginx + name: ingress-nginx + version: 4.0.3 + values: + controller: + ingressClassResource: + default: true + service: + externalTrafficPolicy: Local + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + publishService: + enabled: true + autoscaling: + enabled: true + minReplicas: 3 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + minAvailable: 3 + metrics: + enabled: true + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + topologyKey: "kubernetes.io/hostname" +#+end_src + +#+NAME: install nginx-ingress +#+begin_src tmate :dir . :window cluster +kubectl create ns nginx-ingress +kubectl -n nginx-ingress apply -f ./nginx-ingress.yaml +#+end_src + +Due to issues with Equinix Metal [CCM] + BGP IP addressing issues, the IP of the first node is used as the Ingress IP +#+begin_src tmate :dir . :window cluster +export K8S_NODE_IP=$(kubectl get nodes -o=jsonpath='{.items[0].status.addresses[1].address}') +kubectl -n nginx-ingress patch svc nginx-ingress-ingress-nginx-controller --type='json' -p="[{\"op\": \"replace\", \"path\": \"/spec/externalIPs/0\", \"value\":\"${K8S_NODE_IP}\"}]" +#+end_src + +#+NAME: External-DNS manifests +#+begin_src yaml :tangle ./external-dns.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-dns +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: external-dns +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - watch + - list +- apiGroups: + - externaldns.k8s.io + resources: + - dnsendpoints + verbs: + - get + - watch + - list +- apiGroups: + - externaldns.k8s.io + resources: + - dnsendpoints/status + verbs: + - get + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: +- kind: ServiceAccount + name: external-dns + namespace: external-dns +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: external-dns + template: + metadata: + labels: + app: external-dns + spec: + serviceAccountName: external-dns + containers: + - name: external-dns + image: k8s.gcr.io/external-dns/external-dns:v0.7.4 + args: + - --source=crd + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + - --provider=pdns + - --policy=sync + - --registry=txt + - --interval=10s + - --log-level=debug + env: + - name: EXTERNAL_DNS_TXT_OWNER_ID + valueFrom: + secretKeyRef: + name: external-dns-pdns + key: txt-owner-id + - name: EXTERNAL_DNS_PDNS_SERVER + valueFrom: + secretKeyRef: + name: external-dns-pdns + key: pdns-server + - name: EXTERNAL_DNS_PDNS_API_KEY + valueFrom: + secretKeyRef: + name: external-dns-pdns + key: pdns-api-key + - name: EXTERNAL_DNS_PDNS_TLS_ENABLED + value: "0" +#+end_src + +#+NAME: External-DNS +#+begin_src tmate :dir . :window cluster + kubectl create ns external-dns + curl -o external-dns-crd.yaml -L https://raw.githubusercontent.com/kubernetes-sigs/external-dns/master/docs/contributing/crd-source/crd-manifest.yaml + kubectl apply -f ./external-dns-crd.yaml + kubectl -n external-dns create secret generic external-dns-pdns \ + --from-literal=txt-owner-id=gitlab \ + --from-literal=pdns-server=http://powerdns-service-api.powerdns:8081 \ + --from-literal=pdns-api-key=pairingissharing + kubectl -n external-dns apply -f ./external-dns.yaml +#+end_src + +#+NAME: PowerDNS +#+begin_src yaml :tangle powerdns.yaml +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: powerdns +spec: + releaseName: powerdns + chart: + git: https://github.com/sharingio/helm-charts + ref: master + path: charts/powerdns + values: + domain: gitlab-staging.ii.coop + default_soa_name: gitlab-staging.ii.coop + apikey: pairingissharing + powerdns: + default_ttl: 3600 + soa_minimum_ttl: 3600 + domain: gitlab-staging.ii.coop + default_soa_name: gitlab-staging.ii.coop + mysql_host: powerdns-service-db + mysql_user: powerdns + extraEnv: + - name: PDNS_dnsupdate + value: "yes" + - name: PDNS_allow_dnsupdate_from + value: "192.168.0.0/24" + service: + dns: + tcp: + enabled: true + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + externalIPs: + - ${LOAD_BALANCER_IP} + udp: + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + externalIPs: + - ${LOAD_BALANCER_IP} + mariadb: + mysql_pass: pairingissharing + mysql_rootpass: pairingissharing + admin: + enabled: false + ingress: + enabled: false + secret: pairingissharing +#+end_src + +#+NAME: install PowerDNS +#+begin_src tmate :dir . :window cluster + kubectl create ns powerdns + envsubst < ./powerdns.yaml | kubectl -n powerdns apply -f - +#+end_src + +#+NAME: PowerDNS configure +#+begin_src tmate :dir . :window cluster + kubectl -n powerdns wait pod --for=condition=Ready --selector=app.kubernetes.io/name=powerdns --timeout=200s + until [ "$(dig A ns1.gitlab-staging.ii.coop +short)" = "${LOAD_BALANCER_IP}" ]; do + echo "BaseDNSName does not resolve to Instance IP yet" + sleep 1 + done + kubectl -n powerdns exec deployment/powerdns -- pdnsutil generate-tsig-key pair hmac-md5 + kubectl -n powerdns exec deployment/powerdns -- pdnsutil activate-tsig-key gitlab-staging.ii.coop pair master + kubectl -n powerdns exec deployment/powerdns -- pdnsutil set-meta gitlab-staging.ii.coop TSIG-ALLOW-DNSUPDATE pair + kubectl -n powerdns exec deployment/powerdns -- pdnsutil set-meta gitlab-staging.ii.coop NOTIFY-DNSUPDATE 1 + kubectl -n powerdns exec deployment/powerdns -- pdnsutil set-meta gitlab-staging.ii.coop SOA-EDIT-DNSUPDATE EPOCH + export POWERDNS_TSIG_SECRET="$(kubectl -n powerdns exec deployment/powerdns -- pdnsutil list-tsig-keys | grep pair | awk '{print $3}' | tr -d '\n')" + nsupdate < 'AWS', + 'region' => 'us-east-1', + 'aws_access_key_id' => '', + 'aws_secret_access_key' => '', + 'host' => 'minio.gitlab-staging.ii.coop', + 'endpoint' => 'https://minio.gitlab-staging.ii.coop' +} + +gitlab_rails['uploads_object_store_enabled'] = true +gitlab_rails['uploads_object_store_remote_directory'] = "uploads" +gitlab_rails['uploads_object_store_connection'] = { + 'provider' => 'AWS', + 'region' => 'us-east-1', + 'aws_access_key_id' => '', + 'aws_secret_access_key' => '', + 'endpoint' => 'https://minio.gitlab-staging.ii.coop', + 'host' => 'minio.gitlab-staging.ii.coop', +} + +gitlab_rails['lfs_object_store_enabled'] = true +gitlab_rails['lfs_object_store_remote_directory'] = "lfs-objects" +gitlab_rails['lfs_object_store_connection'] = { + 'provider' => 'AWS', + 'region' => 'us-east-1', + 'aws_access_key_id' => '', + 'aws_secret_access_key' => '', + 'endpoint' => 'https://minio.gitlab-staging.ii.coop', + 'host' => 'minio.gitlab-staging.ii.coop', + 'path_style' => true +} +#+end_src +filling in the =aws_access_key_id= and =aws_secret_access_key= fields with the MinIO access keys + +#+begin_src shell +# update the settings +gitlab-ctl reconfigure + +# migrate LFS to new instance +gitlab-rake gitlab:lfs:migrate + +# migrate all uploads to the new instance +gitlab-rake gitlab:uploads:migrate:all + +# create a backup of all the data +gitlab-rake gitlab:backup:create SKIP=artifacts,lfs,uploads +#+end_src + +Prepare a /gitlab-rails-secret/ based off of the =/etc/gitlab/gitlab-secrets.rb= file +#+begin_src tmate :window cluster +read -p 'db_key_base: ' db_key_base && export db_key_base && \ +read -p 'secret_key_base: ' secret_key_base && export secret_key_base && \ +read -p 'otp_key_base: ' otp_key_base && export otp_key_base && \ +read -p 'encrypted_settings_key_base: ' encrypted_settings_key_base && export encrypted_settings_key_base && \ +echo "Place the existing 'openid_connect_signing_key' contents in the field" && \ +echo "Place the existing 'ci_jwt_signing_key' contents in the field" && \ +cat << EOF > /tmp/gitlab-rails-secrets.yml +production: + db_key_base: $db_key_base + secret_key_base: $secret_key_base + otp_key_base: $otp_key_base + encrypted_settings_key_base: $encrypted_settings_key_base + ci_jwt_signing_key: | + + openid_connect_signing_key: | + +EOF +#+end_src +Edit //tmp/gitlab-rails-secrets.yml/ to include the remaining certificates + +Update the rails-secret +#+begin_src tmate :window cluster +kubectl -n gitlab create secret generic gitlab-rails-secret --from-file=secrets.yml=/tmp/gitlab-rails-secrets.yml -o yaml --dry-run=client \ + | kubectl apply -f - +#+end_src + +Get new Pods that know about the secret +#+begin_src tmate :window cluster +kubectl -n gitlab rollout restart $(kubectl -n gitlab get deployment -o=jsonpath='{range .items[*]}deployment/{.metadata.name} {end}' | grep -E 'sidekiq|webservice|task-runner') +#+end_src + +(on existing GitLab instance) +#+begin_src shell +docker-compose exec gitlab.ii.coop bash + +cd /var/opt/gitlab/backups/ + +# find the latest backup file +GITLAB_BACKUP_FILE=$(realpath $(ls /var/opt/gitlab/backups/ | grep $(date +%m_%d))) +cp $GITLAB_BACKUP_FILE /tmp/gitlab_backup.tar + +exit + +# copy out of the container +docker cp $(docker ps | grep gitlab.ii.coop | awk '{print $1}'):/tmp/gitlab_backup.tar /tmp/gitlab_backup.tar +#+end_src + +Copy the backup onto this host +#+begin_src tmate :window cluster +scp root@147.75.69.207:/tmp/gitlab_backup.tar /tmp/gitlab_backup.tar +#+end_src + +Copy the backup into the task-runner Pod +#+begin_src tmate :window cluster +export TASK_RUNNER_POD_NAME=$(kubectl -n gitlab get pods -l app=task-runner -o=jsonpath='{.items[0].metadata.name}') +kubectl -n gitlab cp -c task-runner /tmp/gitlab_backup.tar $(kubectl -n gitlab get pods -l app=task-runner -o=jsonpath='{.items[0].metadata.name}'):/tmp/gitlab_backup.tar +#+end_src + +Drop the /pg_stat_statements/ extension +#+begin_src tmate :window cluster +POSTGRES_PASSWORD="$(kubectl -n gitlab get secret gitlab.gitlab-db.credentials -o=jsonpath='{.data.password}' | base64 -d)" +kubectl -n gitlab exec -it deployment/gitlab-db-pooler -- psql "postgres://gitlab:$POSTGRES_PASSWORD@gitlab-db-pooler:5432/gitlab?sslmode=require" -c 'drop extension pg_stat_statements cascade;' +#+end_src + +Restore the backup +#+begin_src tmate :window cluster +kubectl -n gitlab exec -c task-runner -it $TASK_RUNNER_POD_NAME -- backup-utility --restore -f file:///tmp/gitlab_backup.tar +#+end_src + +Enable Kubernetes features +#+begin_src tmate :window cluster +kubectl -n gitlab exec -c task-runner -it $TASK_RUNNER_POD_NAME -- gitlab-rails runner -e production /scripts/custom-instance-setup +#+end_src + +Get new Pods, after restoring from the backup +#+begin_src tmate :window cluster +kubectl -n gitlab rollout restart $(kubectl -n gitlab get deployment -o=jsonpath='{range .items[*]}deployment/{.metadata.name} {end}' | tr ' ' '\n' | grep -E 'sidekiq|webservice') +#+end_src + +** Debug +Jump into the database +#+begin_src tmate :window cluster +POSTGRES_PASSWORD="$(kubectl -n gitlab get secret gitlab.gitlab-db.credentials -o=jsonpath='{.data.password}' | base64 -d)" +kubectl -n gitlab exec -it deployment/gitlab-db-pooler -- psql "postgres://gitlab:$POSTGRES_PASSWORD@gitlab-db-pooler:5432/gitlab?sslmode=require" +#+end_src + +View the rendered manifests +#+begin_src tmate :window cluster +helm repo add gitlab https://charts.gitlab.io/ +helm repo update +helm template -n gitlab gitlab gitlab/gitlab --version 5.1.5 -f <(helm get values -n gitlab gitlab -o yaml | cat 2> /dev/null) +#+end_src + +If Helm-Operator is not liking upgrading, very occasionally manual upgrading is needed +#+begin_src tmate :window cluster +helm upgrade --install -n gitlab gitlab gitlab/gitlab --version 5.0.0 -f <(helm get values -n gitlab gitlab -o yaml | cat 2> /dev/null) +#+end_src + +Ensure that background jobs have completed before continuing in a migration +#+begin_src tmate :window cluster +export TASK_RUNNER_POD_NAME=$(kubectl -n gitlab get pods -l app=task-runner -o=jsonpath='{.items[0].metadata.name}') +kubectl -n gitlab exec -c task-runner -it $TASK_RUNNER_POD_NAME -- gitlab-rake gitlab:background_migrations:finalize[CopyColumnUsingBackgroundMigrationJob,push_event_payloads,event_id,'[["event_id"]\, ["event_id_convert_to_bigint"]]'] +#+end_src + +** TODO +- ensure that the /gitlab-gitlab-shell/ Service in the /gitlab/ Namespace has the same LoadBalancerIP as the /nginx-ingress-ingress-nginx-controller/ in the /nginx-ingress/ Namespace + https://metallb.universe.tf/usage/ + +** References +- https://docs.gitlab.com/charts/installation/migration/package_to_helm.html +- https://docs.gitlab.com/charts/backup-restore/restore.html +- https://docs.gitlab.com/ee/administration/object_storage.html#connection-settings + +* Install Mattermost + +#+begin_src tmate :dir . :window cluster +kubectl create namespace mattermost -o yaml --dry-run=client | \ + kubectl apply -f - +#+end_src + +** Install Postgresql-HA +#+name: postgres-database +#+begin_src yaml :tangle ./mattermost-postgresql.yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: mattermost-db +spec: + enableConnectionPooler: true + connectionPooler: + mode: session + resources: + requests: + cpu: 250m + memory: 100Mi + limits: + cpu: "1" + memory: 100Mi + teamId: "mattermost" + volume: + size: 50Gi + numberOfInstances: 3 + users: + mattermost: # database owner + - superuser + - createdb + databases: + mattermost: mattermost # dbname: owner + postgresql: + version: "13" +#+end_src + +#+name: install-postgres-database +#+begin_src tmate :dir . :window cluster +kubectl -n mattermost apply -f ./mattermost-postgresql.yaml +#+end_src + +** Install MinIO Operator +Create the namespace: +#+name: create-minio-namespace +#+begin_src tmate :dir . :window cluster +kubectl create ns minio-operator +#+end_src + +Download the latest manifests: +#+name: download-minio-operator-manifests +#+begin_src tmate :dir . :window cluster +curl -O -L https://raw.githubusercontent.com/mattermost/mattermost-operator/v1.14.0/docs/minio-operator/minio-operator.yaml +#+end_src + +Install the operator: +#+name: install-minio-operator +#+begin_src tmate :dir . :window cluster +kubectl -n minio-operator apply -f ./minio-operator.yaml +#+end_src + +** Install Mattermost Operator +Create the namespace: +#+name: create-mattermost-operator-namespace +#+begin_src tmate :dir . :window cluster +kubectl create ns mattermost-operator +#+end_src + +Download the latest manifests: +#+name: download-mattermost-operator-manifests +#+begin_src tmate :dir . :window cluster +curl -O -L https://raw.githubusercontent.com/mattermost/mattermost-operator/v1.14.0/docs/mattermost-operator/mattermost-operator.yaml +#+end_src + +Install the operator: +#+name: install-mattermost-operator +#+begin_src tmate :dir . :window cluster +kubectl apply -n mattermost-operator -f ./mattermost-operator.yaml +#+end_src + +** Install Mattermost +*** Add OAuth secret +#+begin_src tmate :window cluster +read -p 'MM_GITLABSETTINGS_ID: ' MM_GITLABSETTINGS_ID && export MM_GITLABSETTINGS_ID && \ +read -p 'MM_GITLABSETTINGS_SECRET: ' MM_GITLABSETTINGS_SECRET && export MM_GITLABSETTINGS_SECRET && \ +read -p 'MM_GITLABSETTINGS_SCOPE: ' MM_GITLABSETTINGS_SCOPE && export MM_GITLABSETTINGS_SCOPE && \ +read -p 'MM_GITLABSETTINGS_AUTHENDPOINT: ' MM_GITLABSETTINGS_AUTHENDPOINT && export MM_GITLABSETTINGS_AUTHENDPOINT && \ +read -p 'MM_GITLABSETTINGS_TOKENENDPOINT: ' MM_GITLABSETTINGS_TOKENENDPOINT && export MM_GITLABSETTINGS_TOKENENDPOINT && \ +read -p 'MM_GITLABSETTINGS_USERAPIENDPOINT: ' MM_GITLABSETTINGS_USERAPIENDPOINT && export MM_GITLABSETTINGS_USERAPIENDPOINT && \ +read -p 'MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL: ' MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL && export MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL && \ +read -p 'MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME: ' MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME && export MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME && \ +read -p 'MM_EMAILSETTINGS_FEEDBACKNAME: ' MM_EMAILSETTINGS_FEEDBACKNAME && export MM_EMAILSETTINGS_FEEDBACKNAME && \ +read -p 'MM_EMAILSETTINGS_SMTPUSERNAME: ' MM_EMAILSETTINGS_SMTPUSERNAME && export MM_EMAILSETTINGS_SMTPUSERNAME && \ +read -p 'MM_EMAILSETTINGS_SMTPPASSWORD: ' MM_EMAILSETTINGS_SMTPPASSWORD && export MM_EMAILSETTINGS_SMTPPASSWORD && \ +read -p 'MM_EMAILSETTINGS_SMTPSERVER: ' MM_EMAILSETTINGS_SMTPSERVER && export MM_EMAILSETTINGS_SMTPSERVER && \ +read -p 'MM_EMAILSETTINGS_SMTPPORT: ' MM_EMAILSETTINGS_SMTPPORT && export MM_EMAILSETTINGS_SMTPPORT && \ +read -p 'MM_EMAILSETTINGS_CONNECTIONSECURITY: ' MM_EMAILSETTINGS_CONNECTIONSECURITY && export MM_EMAILSETTINGS_CONNECTIONSECURITY && \ +kubectl -n mattermost create secret generic mattermost-user-config \ + --from-literal=MM_GITLABSETTINGS_ID="$MM_GITLABSETTINGS_ID" \ + --from-literal=MM_GITLABSETTINGS_SECRET="$MM_GITLABSETTINGS_SECRET" \ + --from-literal=MM_GITLABSETTINGS_AUTHENDPOINT="$MM_GITLABSETTINGS_AUTHENDPOINT" \ + --from-literal=MM_GITLABSETTINGS_TOKENENDPOINT="$MM_GITLABSETTINGS_TOKENENDPOINT" \ + --from-literal=MM_GITLABSETTINGS_USERAPIENDPOINT="$MM_GITLABSETTINGS_USERAPIENDPOINT" \ + --from-literal=MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL="$MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL" \ + --from-literal=MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME="$MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME" \ + --from-literal=MM_EMAILSETTINGS_FEEDBACKNAME="$MM_EMAILSETTINGS_FEEDBACKNAME" \ + --from-literal=MM_EMAILSETTINGS_SMTPUSERNAME="$MM_EMAILSETTINGS_SMTPUSERNAME" \ + --from-literal=MM_EMAILSETTINGS_SMTPPASSWORD="$MM_EMAILSETTINGS_SMTPPASSWORD" \ + --from-literal=MM_EMAILSETTINGS_SMTPSERVER="$MM_EMAILSETTINGS_SMTPSERVER" \ + --from-literal=MM_EMAILSETTINGS_SMTPPORT="$MM_EMAILSETTINGS_SMTPPORT" \ + --from-literal=MM_EMAILSETTINGS_CONNECTIONSECURITY="$MM_EMAILSETTINGS_CONNECTIONSECURITY" \ + -o yaml --dry-run=client \ + | kubectl apply -f - +#+end_src + +*** Operator configuration +#+name: mattermost-cluster-definition +#+begin_src yaml :tangle ./mattermost.yaml +apiVersion: installation.mattermost.com/v1beta1 +kind: Mattermost +metadata: + name: mattermost # Name of your cluster as shown in Kubernetes. + namespace: mattermost +spec: + database: + external: + secret: mattermost-database + elasticSearch: {} + fileStore: + operatorManaged: + replicas: 1 + resources: + requests: + cpu: 150m + memory: 512Mi + storageSize: 50Gi + image: mattermost/mattermost-enterprise-edition + imagePullPolicy: IfNotPresent + ingressAnnotations: + kubernetes.io/ingress.class: nginx + ingressName: mattermost.ii.coop + probes: + livenessProbe: {} + readinessProbe: {} + replicas: 1 + scheduling: + affinity: {} + resources: + limits: + cpu: "2" + memory: 4Gi + requests: + cpu: 150m + memory: 256Mi + useIngressTLS: true + version: "5.39" + mattermostEnv: + - name: MM_GITLABSETTINGS_ENABLE + value: "true" + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL + value: "true" + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME + value: "true" + - name: MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS + value: "true" + - name: MM_EMAILSETTINGS_REQUIREEMAILVERIFICATION + value: "false" + - name: MM_EMAILSETTINGS_FEEDBACKEMAIL + value: "mattermost@ii.coop" + - name: MM_EMAILSETTINGS_FEEDBACKORGANIZATION + value: "ii.coop" + - name: MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS + value: "true" + - name: MM_EMAILSETTINGS_SENDPUSHNOTIFICATIONS + value: "true" + - name: MM_EMAILSETTINGS_PUSHNOTIFICATIONSERVER + value: "https://push-test.mattermost.com" + - name: MM_EMAILSETTINGS_PUSHNOTIFICATIONCONTENTS + value: "full" + - name: MM_EMAILSETTINGS_ENABLESMTPAUTH + value: "true" + - name: MM_GITLABSETTINGS_ID + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_ID + - name: MM_GITLABSETTINGS_SECRET + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_SECRET + - name: MM_GITLABSETTINGS_AUTHENDPOINT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_AUTHENDPOINT + - name: MM_GITLABSETTINGS_TOKENENDPOINT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_TOKENENDPOINT + - name: MM_GITLABSETTINGS_USERAPIENDPOINT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_USERAPIENDPOINT + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME + - name: MM_EMAILSETTINGS_FEEDBACKNAME + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_FEEDBACKNAME + - name: MM_EMAILSETTINGS_SMTPUSERNAME + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPUSERNAME + - name: MM_EMAILSETTINGS_SMTPPASSWORD + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPPASSWORD + - name: MM_EMAILSETTINGS_SMTPSERVER + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPSERVER + - name: MM_EMAILSETTINGS_SMTPPORT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPPORT + - name: MM_EMAILSETTINGS_CONNECTIONSECURITY + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_CONNECTIONSECURITY +#+end_src + +#+name: wait-for-mattermost-db-creds +#+begin_src tmate :dir . :window cluster +until kubectl -n mattermost get secret/mattermost.mattermost-db.credentials; do + sleep 5s +done +#+end_src + +#+name: create-mattermost-database-connection-string +#+begin_src tmate :dir . :window cluster +POSTGRES_PASSWORD="$(kubectl -n mattermost get secret mattermost.mattermost-db.credentials -o=jsonpath='{.data.password}' | base64 -d)" +kubectl -n mattermost create secret generic mattermost-database --from-literal=DB_CONNECTION_STRING="postgres://mattermost:$POSTGRES_PASSWORD@mattermost-db-pooler:5432/mattermost?sslmode=require" -o yaml --dry-run=client | \ + kubectl apply -f - +#+end_src + +#+name: install-mattermost-cluster +#+begin_src tmate :dir . :window cluster +envsubst < ./mattermost.yaml | kubectl -n mattermost apply -f - +#+end_src + +Backup CronJob for Mattermost, storing [Postgres, MinIO, Config] all to a s3 bucket each week at 8:45 on a Tuesday +#+begin_src yaml :tangle ./mattermost-backup.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mattermost-backup + namespace: mattermost +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: mattermost-backup + namespace: mattermost +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +- apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: mattermost-backup + namespace: mattermost +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: mattermost-backup +subjects: +- kind: ServiceAccount + name: mattermost-backup + namespace: mattermost +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: mattermost-backup + namespace: mattermost +spec: + jobTemplate: + metadata: + name: mattermost-backup + spec: + template: + metadata: + labels: + app: mattermost-backup + spec: + serviceAccountName: mattermost-backup + volumes: + - name: tmp + emptyDir: {} + initContainers: + - name: get-date + image: alpine:3.15 + command: + - sh + - -x + - -c + - date +%Y%m%d%H%M | tee /tmp/date.txt + volumeMounts: + - name: tmp + mountPath: /tmp + - name: dump-config + image: alpine:3.15 + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + apk add --no-cache curl && \ + curl -L -o /usr/local/bin/kubectl https://dl.k8s.io/v1.23.3/bin/linux/amd64/kubectl && \ + chmod +x /usr/local/bin/kubectl && \ + kubectl -n mattermost exec -it -c mattermost deployment/mattermost -- cat /mattermost/config/config.json > /tmp/mattermost-config-${DATE}.json && \ + cd /tmp && \ + tar cvf /tmp/mattermost-config-${DATE}.json.tar.gz /tmp/mattermost-config-${DATE}.json + volumeMounts: + - name: tmp + mountPath: /tmp + - name: dump-database + image: postgres:13.6-alpine + envFrom: + - secretRef: + name: mattermost-database + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + pg_dump "${DB_CONNECTION_STRING}" -f /tmp/mattermost-db-${DATE}.sql && \ + cd /tmp && \ + tar cvf ./mattermost-db-${DATE}.sql.tar.gz /tmp/mattermost-db-${DATE}.sql + volumeMounts: + - name: tmp + mountPath: /tmp + - name: dump-minio + image: minio/mc:RELEASE.2022-02-13T23-26-13Z + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: mattermost-minio + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: mattermost-minio + key: secretkey + - name: MINIO_HOST + value: mattermost-minio-hl-svc.mattermost:9000 + - name: MINIO_BUCKET + value: mattermost + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + microdnf install tar && \ + mc config host add mattermostminio http://mattermost-minio-hl-svc.mattermost:9000 ${MINIO_ACCESS_KEY} ${MINIO_SECRET_KEY} && \ + mc cp --recursive mattermostminio/mattermost /tmp/mattermost-minio-${DATE}/ && \ + cd /tmp/mattermost-minio-${DATE}/ && \ + tar cvf ../mattermost-minio-${DATE}.tar.gz . + volumeMounts: + - name: tmp + mountPath: /tmp + containers: + - name: mattermost-backup + image: amazon/aws-cli:2.4.18 + envFrom: + - secretRef: + name: aws-serviceaccount-secret + env: + - name: S3_BUCKET + value: ii-nz + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + aws configure set aws_access_key_id "${AWS_ACCESS_KEY}" && \ + aws configure set aws_secret_access_key "${AWS_SECRET_KEY}" && \ + aws configure set default.region ap-southeast-2 && \ + cd /tmp && \ + for FILE in mattermost*.tar.gz; do + aws s3 cp "${FILE}" "s3://${S3_BUCKET}/mattermost-backup/${DATE}/${FILE}" + done + # - sleep 100000 + volumeMounts: + - name: tmp + mountPath: /tmp + restartPolicy: OnFailure + schedule: 45 8 * * 2 + # schedule: "*/30 * * * *" +#+end_src + +TODO: +- add IAM account with Bucket access role +- test the CronJob + +Apply the CronJob +#+begin_src tmate :window cluster +kubectl apply -f ./mattermost-backup.yaml +#+end_src + +Watch the mattermost-backup pod +#+begin_src tmate :window cluster +kubectl -n mattermost get pods -l app=mattermost-backup -w +#+end_src + +Get logs +#+begin_src tmate :window cluster +kubectl -n mattermost logs -l app=mattermost-backup --tail=10 -f +#+end_src + +** Certs +#+begin_src yaml :tangle ./certs-mattermost.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod-mattermost + namespace: mattermost +spec: + secretName: mattermost-ii-coop-tls-cert + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + group: cert-manager.io + commonName: mattermost.ii.coop + dnsNames: + - mattermost.ii.coop +#+end_src + +#+begin_src tmate :dir . :window cluster +envsubst < ./certs-mattermost.yaml | kubectl apply -f - +#+end_src + +** Migrating Mattermost +#+begin_src shell +# exec into the omnibus container +docker-compose exec gitlab.ii.coop bash +# become the gitlab-psql user +su gitlab-psql -c bash +# run the backup command +/opt/gitlab/embedded/bin/pg_dump -U gitlab-psql -h /var/opt/gitlab/postgresql mattermost_production -w | gzip > /tmp/mattermost_dbdump_$(date --rfc-3339=date).sql.gz +# return to the root user +exit +# include the mattermost data +tar -zcvf /tmp/mattermost_data_$(date --rfc-3339=date).gz -C /var/opt/gitlab/mattermost data config.json +exit +# copy the back up out +docker cp $(docker ps | grep gitlab.ii.coop | awk '{print $1}'):/tmp/mattermost_data_$(date --rfc-3339=date).gz /tmp/mattermost_data.gz +docker cp $(docker ps | grep gitlab.ii.coop | awk '{print $1}'):/tmp/mattermost_dbdump_$(date --rfc-3339=date).sql.gz /tmp/mattermost_dbdump.sql.gz +#+end_src + +Copy the databse +#+begin_src tmate :window mmm +scp root@147.75.69.207:/tmp/mattermost_dbdump.sql.gz /tmp/mattermost_dbdump.sql.gz +#+end_src + +Copy the data +#+begin_src tmate :window mmm +scp root@147.75.69.207:/tmp/mattermost_data.gz /tmp/mattermost_data.gz +#+end_src + +Port-forward the remote database +#+begin_src tmate :window mmm +export KUBECONFIG=~/.kube/config-ii-coop +kubectl -n mattermost port-forward svc/mattermost-db-pooler 5432 +#+end_src + +Restore the database +#+begin_src tmate :window cluster +zcat /tmp/mattermost_dbdump.sql.gz | psql $(kubectl -n mattermost get secret mattermost-database -o=jsonpath='{.data.DB_CONNECTION_STRING}' | base64 -d | sed 's/mattermost-db-pooler/localhost/g') +#+end_src + +Extract the data +#+begin_src tmate :window cluster +mkdir -p /tmp/mattermost-data +tar -xzvf /tmp/mattermost_data.gz -C /tmp/mattermost-data +#+end_src + +Port-forward the MinIO server +#+begin_src tmate :window mmm +export KUBECONFIG=~/.kube/config-ii-coop +kubectl -n mattermost port-forward svc/mattermost-minio-hl-svc 9000 +#+end_src + +Configure the connection to the MinIO server +#+begin_src tmate :window cluster +mc alias set mattermost-minio-hl-svc http://localhost:9000 $(kubectl -n mattermost get secrets mattermost-minio -o=jsonpath='{.data.accesskey}' | base64 -d) $(kubectl -n mattermost get secrets mattermost-minio -o=jsonpath='{.data.secretkey}' | base64 -d) +#+end_src + +Copy the data over to MinIO +#+begin_src tmate :window cluster +mc cp --recursive /tmp/mattermost-data/data/ mattermost-minio/mattermost/data/ +#+end_src + +** References +- https://docs.gitlab.com/omnibus/gitlab-mattermost/#back-up-gitlab-mattermost +- https://docs.mattermost.com/install/install-kubernetes.html + +* Install Kanban +** Prepare +Create a namespace +#+begin_src tmate :window kanban +kubectl create ns kanban-ii-coop +#+end_src + +#+RESULTS: +#+begin_example +namespace/kanban-ii-coop created +#+end_example + +** Configure +Certs +#+begin_src yaml :tangle ./kanboard.yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: kanban-ii-coop +spec: + acme: + email: kanban@ii.coop + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx + selector: + dnsNames: + - kanban.ii.coop +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kanban-ii-coop + namespace: kanban-ii-coop +spec: + dnsNames: + - kanban.ii.coop + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: kanban-ii-coop + secretName: letsencrypt-prod +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kanboard + namespace: kanban-ii-coop +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + replicas: 1 + selector: + matchLabels: + app: kanboard + template: + metadata: + labels: + app: kanboard + spec: + containers: + - image: kanboard/kanboard:v1.2.20 + name: kanboard + ports: + - containerPort: 80 + volumeMounts: + - name: kanboard + mountPath: /var/www/app/data + volumes: + - name: kanboard + persistentVolumeClaim: + claimName: kanboard +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: kanboard +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kanboard + namespace: kanban-ii-coop + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: kanban.ii.coop + http: + paths: + - backend: + service: + name: kanboard + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - kanban.ii.coop + secretName: letsencrypt-prod +#+end_src + +** Install +Apply all the things +#+begin_src tmate :window kanban +kubectl apply -f kanboard.yaml +#+end_src + +Waiting for cert +#+begin_src shell +kubectl -n kanban-ii-coop get challenges +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +Checkout the ingress +#+begin_src shell +kubectl -n kanban-ii-coop get ing +#+end_src + +#+RESULTS: +#+begin_example +NAME CLASS HOSTS ADDRESS PORTS AGE +kanboard kanban.ii.coop 139.178.68.219 80, 443 10m +#+end_example + +* Install Wireguard +** Prepare +#+begin_src tmate :window cluster +kubectl create namespace wireguard --dry-run=client -o yaml | \ + kubectl apply -f - +#+end_src +** Configure +#+begin_src yaml :tangle ./wireguard.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: wireguard + name: wireguard-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: wireguard-config + namespace: wireguard +data: + PUID: "1000" + PGID: "1000" + TZ: "Pacific/Auckland" + SERVERPORT: "51820" + SERVERURL: "wg.ii.coop" + PEERS: "30" + # PEERDNS: "10.43.0.30" + PEERDNS: "auto" + ALLOWEDIPS: "0.0.0.0/0, ::/0" + INTERNAL_SUBNET: "10.13.13.0" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: wireguard + namespace: wireguard + labels: + app: wireguard +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: wireguard + template: + metadata: + labels: + app: wireguard + spec: + containers: + - name: wireguard + image: docker.io/linuxserver/wireguard:v1.0.20210424-ls36 + envFrom: + - configMapRef: + name: wireguard-config + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - name: wg-config + mountPath: /config + - name: host-volumes + mountPath: /lib/modules + ports: + - containerPort: 51820 + protocol: UDP + resources: + requests: + memory: "64Mi" + cpu: "10m" + limits: + memory: "128Mi" + cpu: "100m" + volumes: + - name: wg-config + persistentVolumeClaim: + claimName: wireguard-pvc + - name: host-volumes + hostPath: + path: /lib/modules + type: Directory +--- +apiVersion: v1 +kind: Service +metadata: + name: wireguard + namespace: wireguard +spec: + selector: + app: wireguard + ports: + - name: wireguard + port: 51820 + targetPort: 51820 + protocol: UDP + externalTrafficPolicy: Local + type: LoadBalancer +#+end_src + +** Install +#+begin_src tmate :window cluster +kubectl apply -f ./wireguard.yaml +#+end_src + +* Finalise +** Move ClusterAPI management +#+begin_src tmate :window SHELL +clusterctl move -n ii-coop --to-kubeconfig=$HOME/.kube/config-ii-coop +#+end_src + + +* Extras +** Move from Packet-CCM to Metal-CCM +Move config over and use LoadBalancer configuration pointing to MetalLB +#+begin_src tmate :window cluster +kubectl -n kube-system create secret generic metal-cloud-config --from-file=cloud-sa.json=<(kubectl -n kube-system get secret packet-cloud-config -o=jsonpath='{.data.cloud-sa\.json}' | base64 -d | jq -cr '. | .loadbalancer = "metallb:///"') +#+end_src + +Remove the older Packet-CCM +#+begin_src tmate :window cluster +kubectl delete -f https://github.com/packethost/packet-ccm/releases/download/v2.0.0/deployment.yaml +#+end_src + +Install Metal-CCM +#+begin_src tmate :window cluster +kubectl apply -f https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.2.2/deployment.yaml +#+end_src + +** cncf.ci redirect +#+NAME: Certs +#+begin_src yaml :tangle ./cncf-ci-redirect.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: cncf-ci-redirect +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod + namespace: cncf-ci-redirect +spec: + secretName: letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - cncf.ci +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cncf-ci-redirect + namespace: cncf-ci-redirect + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(.*) https://github.com/cncf-ci permanent; +spec: + rules: + - host: cncf.ci + http: + paths: + - backend: + service: + name: cncf-ci-redirect + port: + number: 8080 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - cncf.ci + secretName: letsencrypt-prod +#+end_src + +#+begin_src tmate :dir . :window cluster +envsubst < cncf-ci-redirect.yaml | kubectl apply -f - +#+end_src + +** ii-nz-temp +*** multiplex +**** Configure +#+begin_src yaml :tangle ./ii-nz-temp/multiplex.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: ii-nz +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: multiplex-ii-nz-letsencrypt-prod + namespace: ii-nz +spec: + secretName: letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - "multiplex.ii.nz" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: reveal-multiplex + name: reveal-multiplex + namespace: ii-nz +spec: + replicas: 1 + selector: + matchLabels: + app: reveal-multiplex + template: + metadata: + labels: + app: reveal-multiplex + spec: + containers: + - name: reveal-multiplex + image: registry.gitlab.com/ii/nz/reveal-multiplex + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: http + timeoutSeconds: 1 + ports: + - containerPort: 1948 + name: http + protocol: TCP + readinessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: http + timeoutSeconds: 1 + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: reveal-multiplex + name: reveal-multiplex + namespace: ii-nz +spec: + ports: + - name: http + port: 1948 + protocol: TCP + targetPort: http + selector: + app: reveal-multiplex +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + labels: + app: reveal-multiplex + name: reveal-multiplex + namespace: ii-nz +spec: + rules: + - host: multiplex.ii.nz + http: + paths: + - backend: + service: + name: reveal-multiplex + port: + number: 1948 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - multiplex.ii.nz + secretName: letsencrypt-prod +#+end_src + +**** Install +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./ii-nz-temp/multiplex.yaml +#+end_src + +*** expo-prezzo +**** Configure +#+begin_src yaml :tangle ./ii-nz-temp/expo-prezzo.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: careers-expo-ii-nz +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: careers-expo-ii-nz-letsencrypt-prod + namespace: careers-expo-ii-nz +spec: + secretName: careers-expo-ii-nz-letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - "careers-expo.ii.nz" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: careers-expo-ii-nz + namespace: careers-expo-ii-nz + labels: + environment: production +spec: + replicas: 1 + selector: + matchLabels: + app: careers-expo-ii-nz + template: + metadata: + labels: + app: careers-expo-ii-nz + environment: production + spec: + automountServiceAccountToken: false + containers: + - name: careers-expo-ii-nz + image: registry.gitlab.com/ii/expo-prezzo:2021.08.12.1642 + imagePullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: true + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8101 + env: + - name: TZ + value: "Pacific/Auckland" + readinessProbe: + tcpSocket: + port: 8101 + initialDelaySeconds: 2 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8101 + initialDelaySeconds: 2 + periodSeconds: 10 + failureThreshold: 10 + resources: + requests: + memory: "400Mi" + cpu: "1" + limits: + memory: "400Mi" + cpu: "1" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 +--- +apiVersion: v1 +kind: Service +metadata: + name: careers-expo-ii-nz + namespace: careers-expo-ii-nz + labels: + environment: production +spec: + ports: + - port: 8101 + targetPort: 8101 + selector: + app: careers-expo-ii-nz +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: careers-expo-ii-nz + namespace: careers-expo-ii-nz + labels: + environment: production + annotations: + cert-manager.io/cluster-issuer: careers-expo-ii-nz-letsencrypt-prod + kubernetes.io/ingress.class: nginx +spec: + tls: + - hosts: + - "careers-expo.ii.nz" + secretName: careers-expo-ii-nz-letsencrypt-prod + rules: + - host: "careers-expo.ii.nz" + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: careers-expo-ii-nz + port: + number: 8101 +#+end_src + +**** Install +#+begin_src tmate :dir . :window cluster +kubectl apply -f ./ii-nz-temp/expo-prezzo.yaml +#+end_src diff --git a/ii/cluster/certs-mattermost.yaml b/ii/cluster/certs-mattermost.yaml new file mode 100644 index 0000000..59e0564 --- /dev/null +++ b/ii/cluster/certs-mattermost.yaml @@ -0,0 +1,16 @@ +# Certs + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod-mattermost + namespace: mattermost +spec: + secretName: mattermost-ii-coop-tls-cert + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + group: cert-manager.io + commonName: mattermost.ii.coop + dnsNames: + - mattermost.ii.coop diff --git a/ii/cluster/certs.yaml b/ii/cluster/certs.yaml new file mode 100644 index 0000000..d1d7198 --- /dev/null +++ b/ii/cluster/certs.yaml @@ -0,0 +1,32 @@ + + +# #+NAME: Certs + +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: letsencrypt@ii.coop + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - http01: + ingress: + class: nginx +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod +spec: + secretName: letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - "gitlab.ii.coop" + - "minio.gitlab.ii.coop" + - "registry.gitlab.ii.coop" diff --git a/ii/cluster/cncf-ci-redirect.yaml b/ii/cluster/cncf-ci-redirect.yaml new file mode 100644 index 0000000..2d83306 --- /dev/null +++ b/ii/cluster/cncf-ci-redirect.yaml @@ -0,0 +1,46 @@ +# cncf.ci redirect +# #+NAME: Certs + +apiVersion: v1 +kind: Namespace +metadata: + name: cncf-ci-redirect +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod + namespace: cncf-ci-redirect +spec: + secretName: letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - cncf.ci +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cncf-ci-redirect + namespace: cncf-ci-redirect + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(.*) https://github.com/cncf-ci permanent; +spec: + rules: + - host: cncf.ci + http: + paths: + - backend: + service: + name: cncf-ci-redirect + port: + number: 8080 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - cncf.ci + secretName: letsencrypt-prod diff --git a/ii/cluster/dnsendpoint.yaml b/ii/cluster/dnsendpoint.yaml new file mode 100644 index 0000000..0e4eaba --- /dev/null +++ b/ii/cluster/dnsendpoint.yaml @@ -0,0 +1,25 @@ + + +# #+NAME: DNSEndpoint + +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: gitlab-ii-coop +spec: + endpoints: + - dnsName: 'gitlab-staging.ii.coop' + recordTTL: 3600 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: '*.gitlab-staging.ii.coop' + recordTTL: 3600 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: gitlab-staging.ii.coop + recordTTL: 3600 + recordType: SOA + targets: + - 'ns1.gitlab-staging.ii.coop. hostmaster.gitlab-staging.ii.coop. 5 3600 3600 3600 3600' diff --git a/ii/cluster/external-dns.yaml b/ii/cluster/external-dns.yaml new file mode 100644 index 0000000..7be4149 --- /dev/null +++ b/ii/cluster/external-dns.yaml @@ -0,0 +1,110 @@ + + +# #+NAME: External-DNS manifests + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-dns +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: external-dns +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - watch + - list +- apiGroups: + - externaldns.k8s.io + resources: + - dnsendpoints + verbs: + - get + - watch + - list +- apiGroups: + - externaldns.k8s.io + resources: + - dnsendpoints/status + verbs: + - get + - update + - patch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: +- kind: ServiceAccount + name: external-dns + namespace: external-dns +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: external-dns + template: + metadata: + labels: + app: external-dns + spec: + serviceAccountName: external-dns + containers: + - name: external-dns + image: k8s.gcr.io/external-dns/external-dns:v0.7.4 + args: + - --source=crd + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + - --provider=pdns + - --policy=sync + - --registry=txt + - --interval=10s + - --log-level=debug + env: + - name: EXTERNAL_DNS_TXT_OWNER_ID + valueFrom: + secretKeyRef: + name: external-dns-pdns + key: txt-owner-id + - name: EXTERNAL_DNS_PDNS_SERVER + valueFrom: + secretKeyRef: + name: external-dns-pdns + key: pdns-server + - name: EXTERNAL_DNS_PDNS_API_KEY + valueFrom: + secretKeyRef: + name: external-dns-pdns + key: pdns-api-key + - name: EXTERNAL_DNS_PDNS_TLS_ENABLED + value: "0" diff --git a/ii/cluster/gitlab-cluster-capi-template.yaml b/ii/cluster/gitlab-cluster-capi-template.yaml new file mode 100644 index 0000000..d29ed46 --- /dev/null +++ b/ii/cluster/gitlab-cluster-capi-template.yaml @@ -0,0 +1,204 @@ +# Set up cluster +# #+NAME: Cluster-API manifests + +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + postKubeadmCommands: + - | + cat <> /etc/network/interfaces + auto lo:0 + iface lo:0 inet static + address {{ .controlPlaneEndpoint }} + netmask 255.255.255.255 + EOF + - systemctl restart networking + - mkdir -p ~/.kube/ + - cp /etc/kubernetes/admin.conf ~/.kube/config + - 'kubectl create secret generic -n kube-system metal-cloud-config --from-literal=cloud-sa.json=''{"apiKey": "{{ .apiKey }}","projectID": "${PROJECT_ID}","loadbalancer":"metallb:///"}''' + - kubectl apply -f https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.2.2/deployment.yaml + - kubectl taint node --all node-role.kubernetes.io/master- + preKubeadmCommands: + - sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - swapoff -a + - mount -a + - | + cat < /etc/modules-load.d/containerd.conf + overlay + br_netfilter + EOF + - modprobe overlay + - modprobe br_netfilter + - | + cat < /etc/sysctl.d/99-kubernetes-cri.conf + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + EOF + - sysctl --system + - apt-get -y update + - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list + - apt-get update -y + - TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//') + - RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1) + - apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION} + - systemctl daemon-reload + - systemctl enable containerd + - systemctl start containerd + - ping -c 3 -q {{ .controlPlaneEndpoint }} && echo OK || ip addr add {{ .controlPlaneEndpoint }} dev lo +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + OS: "${NODE_OS:=ubuntu_18_04}" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PROJECT_ID}" + facility: "${FACILITY}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + OS: "${NODE_OS:=ubuntu_18_04}" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +kind: KubeadmConfigTemplate +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-worker-a" +spec: + template: + spec: + preKubeadmCommands: + - sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - swapoff -a + - mount -a + - | + cat < /etc/modules-load.d/containerd.conf + overlay + br_netfilter + EOF + - modprobe overlay + - modprobe br_netfilter + - | + cat < /etc/sysctl.d/99-kubernetes-cri.conf + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + EOF + - sysctl --system + - apt-get -y update + - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list + - apt-get update -y + - TRIMMED_KUBERNETES_VERSION=$(echo {{ .kubernetesVersion }} | sed 's/\./\\./g' | sed 's/^v//') + - RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=$${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1) + - apt-get install -y ca-certificates socat jq ebtables apt-transport-https cloud-utils prips containerd kubelet=$${RESOLVED_KUBERNETES_VERSION} kubeadm=$${RESOLVED_KUBERNETES_VERSION} kubectl=$${RESOLVED_KUBERNETES_VERSION} + - systemctl daemon-reload + - systemctl enable containerd + - systemctl start containerd + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external diff --git a/ii/cluster/gitlab-postgres.yaml b/ii/cluster/gitlab-postgres.yaml new file mode 100644 index 0000000..594e043 --- /dev/null +++ b/ii/cluster/gitlab-postgres.yaml @@ -0,0 +1,32 @@ + + +# #+NAME: Postgres database + +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: gitlab-db + namespace: gitlab +spec: + enableConnectionPooler: true + connectionPooler: + mode: session + resources: + requests: + cpu: 250m + memory: 100Mi + limits: + cpu: "1" + memory: 100Mi + teamId: "gitlab" + volume: + size: 50Gi + numberOfInstances: 3 + users: + gitlab: # database owner + - superuser + - createdb + databases: + gitlab: gitlab # dbname: owner + postgresql: + version: "13" diff --git a/ii/cluster/gitlab.yaml b/ii/cluster/gitlab.yaml new file mode 100644 index 0000000..d762f01 --- /dev/null +++ b/ii/cluster/gitlab.yaml @@ -0,0 +1,145 @@ + + +# #+NAME: GitLab + +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: gitlab +spec: + releaseName: gitlab + chart: + repository: https://charts.gitlab.io/ + name: gitlab + version: 5.1.6 + values: + postgresql: + install: false + global: + psql: + host: gitlab-db-pooler.gitlab + password: + secret: gitlab.gitlab-db.credentials + key: password + port: 5432 + database: gitlab + username: gitlab + email: + from: gitlab@ii.coop + reply_to: gitlab@ii.coop + display_name: gitlab.ii.coop + appConfig: + incomingEmail: + enabled: true + address: '%{key}@gitlab.ii.coop' + user: mailbot@ii.coop + host: imap.gmail.com + port: 993 + ssl: true + startTls: false + idleTimeout: 60 + password: + secret: gitlab-ii-coop-imap-password + key: password + omniauth: + enabled: true + blockAutoCreatedUsers: true + allowSingleSignOn: + - twitter + - github + - google_oauth2 + - gitlab + - facebook + providers: + - secret: gitlab-ii-coop-omniauth-github + - secret: gitlab-ii-coop-omniauth-gitlab + - secret: gitlab-ii-coop-omniauth-google-oauth2 + - secret: gitlab-ii-coop-omniauth-twitter + smtp: + enabled: true + address: smtp.gmail.com + authentication: login + openssl_verify_mode: peer + tls: false + starttls_auto: true + domain: gitlab.ii.coop + port: 587 + user_name: mailbot@ii.coop + password: + secret: gitlab-ii-coop-smtp-password + key: password + hosts: + domain: ii.coop + gitlab: + name: gitlab.ii.coop + minio: + name: minio.gitlab.ii.coop + registry: + name: registry.gitlab.ii.coop + ingress: + configureCertmanager: false + pages: + enabled: true + global: + hosts: + domain: gitlab.ii.coop + host: gitlab.ii.coop + port: 443 + https: true + apiSecret: + secret: gitlab-pages-api-secret + key: shared_secret + shell: + port: 22 + certmanager: + install: false + gitlab: + ingress: + enabled: true + ingressclass: nginx + gitlab-shell: + enabled: true + service: + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + type: LoadBalancer + webservice: + ingress: + annotations: + kubernetes.io/ingress.class: nginx + tls: + secretName: letsencrypt-prod + task-runner: + enabled: true + replicas: 1 + persistence: + enabled: true + size: '120Gi' + gitlab-pages: + enabled: true + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + tls: + secretName: letsencrypt-prod + registry: + ingress: + annotations: + kubernetes.io/ingress.class: nginx + tls: + secretName: letsencrypt-prod + minio: + ingress: + annotations: + kubernetes.io/ingress.class: nginx + tls: + secretName: letsencrypt-prod + nginx-ingress: + enabled: false + task-runner: + enabled: true + replicas: 1 + persistence: + enabled: true + size: '120Gi' diff --git a/ii/cluster/humacs-pvc.yaml b/ii/cluster/humacs-pvc.yaml new file mode 100644 index 0000000..676ce52 --- /dev/null +++ b/ii/cluster/humacs-pvc.yaml @@ -0,0 +1,16 @@ + + +# #+NAME: Humacs-PVC + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: humacs-home-ii + namespace: humacs +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-block diff --git a/ii/cluster/humacs.yaml b/ii/cluster/humacs.yaml new file mode 100644 index 0000000..b91d36a --- /dev/null +++ b/ii/cluster/humacs.yaml @@ -0,0 +1,38 @@ + + +# #+NAME: Humacs + +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: humacs + namespace: humacs +spec: + releaseName: humacs + chart: + git: https://github.com/humacs/humacs + ref: main + path: chart/humacs + values: + options: + hostDockerSocket: false + hostTmp: true + timezone: Pacific/Auckland + gitName: gitlab + gitEmail: humacs@ii.coop + profile: ii + image: + repository: registry.gitlab.com/humacs/humacs/ii + tag: 2020.12.03 + extraEnvVars: + - name: HUMACS_DEBUG + value: "true" + - name: REINIT_HOME_FOLDER + value: "true" + extraVolumes: + - name: home-ii + persistentVolumeClaim: + claimName: humacs-home-ii + extraVolumeMounts: + - name: home-ii + mountPath: "/home/ii" diff --git a/ii/cluster/ii-nz-temp/expo-prezzo.yaml b/ii/cluster/ii-nz-temp/expo-prezzo.yaml new file mode 100644 index 0000000..9c154d8 --- /dev/null +++ b/ii/cluster/ii-nz-temp/expo-prezzo.yaml @@ -0,0 +1,122 @@ +# Configure + +apiVersion: v1 +kind: Namespace +metadata: + name: careers-expo-ii-nz +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: careers-expo-ii-nz-letsencrypt-prod + namespace: careers-expo-ii-nz +spec: + secretName: careers-expo-ii-nz-letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - "careers-expo.ii.nz" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: careers-expo-ii-nz + namespace: careers-expo-ii-nz + labels: + environment: production +spec: + replicas: 1 + selector: + matchLabels: + app: careers-expo-ii-nz + template: + metadata: + labels: + app: careers-expo-ii-nz + environment: production + spec: + automountServiceAccountToken: false + containers: + - name: careers-expo-ii-nz + image: registry.gitlab.com/ii/expo-prezzo:2021.08.12.1642 + imagePullPolicy: IfNotPresent + securityContext: + readOnlyRootFilesystem: true + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8101 + env: + - name: TZ + value: "Pacific/Auckland" + readinessProbe: + tcpSocket: + port: 8101 + initialDelaySeconds: 2 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8101 + initialDelaySeconds: 2 + periodSeconds: 10 + failureThreshold: 10 + resources: + requests: + memory: "400Mi" + cpu: "1" + limits: + memory: "400Mi" + cpu: "1" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 +--- +apiVersion: v1 +kind: Service +metadata: + name: careers-expo-ii-nz + namespace: careers-expo-ii-nz + labels: + environment: production +spec: + ports: + - port: 8101 + targetPort: 8101 + selector: + app: careers-expo-ii-nz +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: careers-expo-ii-nz + namespace: careers-expo-ii-nz + labels: + environment: production + annotations: + cert-manager.io/cluster-issuer: careers-expo-ii-nz-letsencrypt-prod + kubernetes.io/ingress.class: nginx +spec: + tls: + - hosts: + - "careers-expo.ii.nz" + secretName: careers-expo-ii-nz-letsencrypt-prod + rules: + - host: "careers-expo.ii.nz" + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: careers-expo-ii-nz + port: + number: 8101 diff --git a/ii/cluster/ii-nz-temp/multiplex.yaml b/ii/cluster/ii-nz-temp/multiplex.yaml new file mode 100644 index 0000000..c98688f --- /dev/null +++ b/ii/cluster/ii-nz-temp/multiplex.yaml @@ -0,0 +1,104 @@ +# Configure + +apiVersion: v1 +kind: Namespace +metadata: + name: ii-nz +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: multiplex-ii-nz-letsencrypt-prod + namespace: ii-nz +spec: + secretName: letsencrypt-prod + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - "multiplex.ii.nz" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: reveal-multiplex + name: reveal-multiplex + namespace: ii-nz +spec: + replicas: 1 + selector: + matchLabels: + app: reveal-multiplex + template: + metadata: + labels: + app: reveal-multiplex + spec: + containers: + - name: reveal-multiplex + image: registry.gitlab.com/ii/nz/reveal-multiplex + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: http + timeoutSeconds: 1 + ports: + - containerPort: 1948 + name: http + protocol: TCP + readinessProbe: + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: http + timeoutSeconds: 1 + securityContext: + readOnlyRootFilesystem: true + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: reveal-multiplex + name: reveal-multiplex + namespace: ii-nz +spec: + ports: + - name: http + port: 1948 + protocol: TCP + targetPort: http + selector: + app: reveal-multiplex +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + labels: + app: reveal-multiplex + name: reveal-multiplex + namespace: ii-nz +spec: + rules: + - host: multiplex.ii.nz + http: + paths: + - backend: + service: + name: reveal-multiplex + port: + number: 1948 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - multiplex.ii.nz + secretName: letsencrypt-prod diff --git a/ii/cluster/kanboard.yaml b/ii/cluster/kanboard.yaml new file mode 100644 index 0000000..c6ba28c --- /dev/null +++ b/ii/cluster/kanboard.yaml @@ -0,0 +1,116 @@ +# Configure +# Certs + +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: kanban-ii-coop +spec: + acme: + email: kanban@ii.coop + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx + selector: + dnsNames: + - kanban.ii.coop +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kanban-ii-coop + namespace: kanban-ii-coop +spec: + dnsNames: + - kanban.ii.coop + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: kanban-ii-coop + secretName: letsencrypt-prod +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kanboard + namespace: kanban-ii-coop +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + replicas: 1 + selector: + matchLabels: + app: kanboard + template: + metadata: + labels: + app: kanboard + spec: + containers: + - image: kanboard/kanboard:v1.2.20 + name: kanboard + ports: + - containerPort: 80 + volumeMounts: + - name: kanboard + mountPath: /var/www/app/data + volumes: + - name: kanboard + persistentVolumeClaim: + claimName: kanboard +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: kanboard +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kanboard + namespace: kanban-ii-coop + annotations: + kubernetes.io/ingress.class: nginx +spec: + rules: + - host: kanban.ii.coop + http: + paths: + - backend: + service: + name: kanboard + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - kanban.ii.coop + secretName: letsencrypt-prod diff --git a/ii/cluster/kubed.yaml b/ii/cluster/kubed.yaml new file mode 100644 index 0000000..fbf6464 --- /dev/null +++ b/ii/cluster/kubed.yaml @@ -0,0 +1,17 @@ + + +# #+NAME: kubed + +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: kubed + namespace: kube-system +spec: + releaseName: kubed + chart: + repository: https://charts.appscode.com/stable/ + name: kubed + version: v0.12.0 + values: + enableAnalytics: false diff --git a/ii/cluster/mattermost-backup.yaml b/ii/cluster/mattermost-backup.yaml new file mode 100644 index 0000000..c06f997 --- /dev/null +++ b/ii/cluster/mattermost-backup.yaml @@ -0,0 +1,175 @@ + + +# Backup CronJob for Mattermost, storing [Postgres, MinIO, Config] all to a s3 bucket each week at 8:45 on a Tuesday + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mattermost-backup + namespace: mattermost +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: mattermost-backup + namespace: mattermost +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +- apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: mattermost-backup + namespace: mattermost +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: mattermost-backup +subjects: +- kind: ServiceAccount + name: mattermost-backup + namespace: mattermost +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: mattermost-backup + namespace: mattermost +spec: + jobTemplate: + metadata: + name: mattermost-backup + spec: + template: + metadata: + labels: + app: mattermost-backup + spec: + serviceAccountName: mattermost-backup + volumes: + - name: tmp + emptyDir: {} + initContainers: + - name: get-date + image: alpine:3.15 + command: + - sh + - -x + - -c + - date +%Y%m%d%H%M | tee /tmp/date.txt + volumeMounts: + - name: tmp + mountPath: /tmp + - name: dump-config + image: alpine:3.15 + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + apk add --no-cache curl && \ + curl -L -o /usr/local/bin/kubectl https://dl.k8s.io/v1.23.3/bin/linux/amd64/kubectl && \ + chmod +x /usr/local/bin/kubectl && \ + kubectl -n mattermost exec -it -c mattermost deployment/mattermost -- cat /mattermost/config/config.json > /tmp/mattermost-config-${DATE}.json && \ + cd /tmp && \ + tar cvf /tmp/mattermost-config-${DATE}.json.tar.gz /tmp/mattermost-config-${DATE}.json + volumeMounts: + - name: tmp + mountPath: /tmp + - name: dump-database + image: postgres:13.6-alpine + envFrom: + - secretRef: + name: mattermost-database + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + pg_dump "${DB_CONNECTION_STRING}" -f /tmp/mattermost-db-${DATE}.sql && \ + cd /tmp && \ + tar cvf ./mattermost-db-${DATE}.sql.tar.gz /tmp/mattermost-db-${DATE}.sql + volumeMounts: + - name: tmp + mountPath: /tmp + - name: dump-minio + image: minio/mc:RELEASE.2022-02-13T23-26-13Z + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: mattermost-minio + key: accesskey + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: mattermost-minio + key: secretkey + - name: MINIO_HOST + value: mattermost-minio-hl-svc.mattermost:9000 + - name: MINIO_BUCKET + value: mattermost + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + microdnf install tar && \ + mc config host add mattermostminio http://mattermost-minio-hl-svc.mattermost:9000 ${MINIO_ACCESS_KEY} ${MINIO_SECRET_KEY} && \ + mc cp --recursive mattermostminio/mattermost /tmp/mattermost-minio-${DATE}/ && \ + cd /tmp/mattermost-minio-${DATE}/ && \ + tar cvf ../mattermost-minio-${DATE}.tar.gz . + volumeMounts: + - name: tmp + mountPath: /tmp + containers: + - name: mattermost-backup + image: amazon/aws-cli:2.4.18 + envFrom: + - secretRef: + name: aws-serviceaccount-secret + env: + - name: S3_BUCKET + value: ii-nz + command: + - sh + - -x + - -c + - | + DATE="$(cat /tmp/date.txt)" && \ + aws configure set aws_access_key_id "${AWS_ACCESS_KEY}" && \ + aws configure set aws_secret_access_key "${AWS_SECRET_KEY}" && \ + aws configure set default.region ap-southeast-2 && \ + cd /tmp && \ + for FILE in mattermost*.tar.gz; do + aws s3 cp "${FILE}" "s3://${S3_BUCKET}/mattermost-backup/${DATE}/${FILE}" + done + # - sleep 100000 + volumeMounts: + - name: tmp + mountPath: /tmp + restartPolicy: OnFailure + schedule: 45 8 * * 2 + # schedule: "*/30 * * * *" diff --git a/ii/cluster/mattermost-operator.yaml b/ii/cluster/mattermost-operator.yaml new file mode 100644 index 0000000..c5324b0 --- /dev/null +++ b/ii/cluster/mattermost-operator.yaml @@ -0,0 +1,4469 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: clusterinstallations.mattermost.com +spec: + group: mattermost.com + names: + kind: ClusterInstallation + listKind: ClusterInstallationList + plural: clusterinstallations + singular: clusterinstallation + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of Mattermost + jsonPath: .status.state + name: State + type: string + - description: Image of Mattermost + jsonPath: .status.image + name: Image + type: string + - description: Version of Mattermost + jsonPath: .status.version + name: Version + type: string + - description: Endpoint + jsonPath: .status.endpoint + name: Endpoint + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterInstallation is the Schema for the clusterinstallations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Mattermost + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status' + properties: + affinity: + description: If specified, affinity will define the pod's scheduling + constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + blueGreen: + description: BlueGreen defines the configuration of BlueGreen deployment + for a ClusterInstallation + properties: + blue: + description: Blue defines the blue deployment. + properties: + image: + description: Image defines the base Docker image that will + be used for the deployment. Required when BlueGreen or Canary + is enabled. + type: string + ingressName: + description: IngressName defines the ingress name that will + be used by the deployment. This option is not used for Canary + builds. + type: string + name: + description: Name defines the name of the deployment + type: string + resourceLabels: + additionalProperties: + type: string + type: object + version: + description: Version defines the Docker image version that + will be used for the deployment. Required when BlueGreen + or Canary is enabled. + type: string + type: object + enable: + description: Enable defines if BlueGreen deployment will be applied. + type: boolean + green: + description: Green defines the green deployment. + properties: + image: + description: Image defines the base Docker image that will + be used for the deployment. Required when BlueGreen or Canary + is enabled. + type: string + ingressName: + description: IngressName defines the ingress name that will + be used by the deployment. This option is not used for Canary + builds. + type: string + name: + description: Name defines the name of the deployment + type: string + resourceLabels: + additionalProperties: + type: string + type: object + version: + description: Version defines the Docker image version that + will be used for the deployment. Required when BlueGreen + or Canary is enabled. + type: string + type: object + productionDeployment: + description: ProductionDeployment defines if the current production + is blue or green. + type: string + type: object + canary: + description: Canary defines the configuration of Canary deployment + for a ClusterInstallation + properties: + deployment: + description: Deployment defines the canary deployment. + properties: + image: + description: Image defines the base Docker image that will + be used for the deployment. Required when BlueGreen or Canary + is enabled. + type: string + ingressName: + description: IngressName defines the ingress name that will + be used by the deployment. This option is not used for Canary + builds. + type: string + name: + description: Name defines the name of the deployment + type: string + resourceLabels: + additionalProperties: + type: string + type: object + version: + description: Version defines the Docker image version that + will be used for the deployment. Required when BlueGreen + or Canary is enabled. + type: string + type: object + enable: + description: Enable defines if a canary build will be deployed. + type: boolean + type: object + database: + description: Database defines the database configuration for a ClusterInstallation. + properties: + backupRemoteDeletePolicy: + description: Defines the backup retention policy. + type: string + backupRestoreSecretName: + description: Defines the secret to be used when performing a database + restore. + type: string + backupSchedule: + description: Defines the interval for backups in cron expression + format. + type: string + backupSecretName: + description: Defines the secret to be used for uploading/restoring + backup. + type: string + backupURL: + description: Defines the object storage url for uploading backups. + type: string + initBucketURL: + description: Defines the AWS S3 bucket where the Database Backup + is stored. The operator will download the file to restore the + data. + type: string + replicas: + description: Defines the number of database replicas. For redundancy + use at least 2 replicas. Setting this will override the number + of replicas set by 'Size'. + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for the + database pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + secret: + description: "Optionally enter the name of an already-existing + Secret for connecting to the database. This secret should be + configured as follows: \n User-Managed Database - Key: DB_CONNECTION_STRING + | Value: Operator-Managed + Database - Key: ROOT_PASSWORD | Value: + \ - Key: USER | Value: - Key: PASSWORD | Value: + - Key: DATABASE Value: \n + Notes: If you define all secret values for both User-Managed + and Operator-Managed database types, the User-Managed connection + string will take precedence and the Operator-Managed values + will be ignored. If the secret is left blank, the default + behavior is to use an Operator-Managed database with strong + randomly-generated database credentials." + type: string + storageSize: + description: Defines the storage size for the database. ie 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: + description: Defines the type of database to use for an Operator-Managed + database. This value is ignored when using a User-Managed database. + type: string + type: object + elasticSearch: + description: ElasticSearch defines the ElasticSearch configuration + for a ClusterInstallation. + properties: + host: + type: string + password: + type: string + username: + type: string + type: object + image: + description: Image defines the ClusterInstallation Docker image. + type: string + imagePullPolicy: + description: Specify deployment pull policy. + type: string + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressName: + description: IngressName defines the name to be used when creating + the ingress rules + type: string + livenessProbe: + description: Defines the probe to check if the application is up and + running. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. + TCP hooks not yet supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + mattermostEnv: + description: Optional environment variables to set in the Mattermost + application pods. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + mattermostLicenseSecret: + description: Secret that contains the mattermost license + type: string + migrate: + description: 'Migrate specifies that the ClusterInstallation CR should + be migrated to the Mattermost CR. CAUTION: Some features like BlueGreen + or Canary are not supported with a new Custom Resource therefore + migration should be performed with extra caution.' + type: boolean + minio: + description: Minio defines the configuration of Minio for a ClusterInstallation. + properties: + externalBucket: + description: Set to the bucket name of your external MinIO or + S3. + type: string + externalURL: + description: Set to use an external MinIO deployment or S3. Must + also set 'Secret' and 'ExternalBucket'. + type: string + replicas: + description: 'Defines the number of Minio replicas. Supply 1 to + run Minio in standalone mode with no redundancy. Supply 4 or + more to run Minio in distributed mode. Note that it is not possible + to upgrade Minio from standalone to distributed mode. Setting + this will override the number of replicas set by ''Size''. More + info: https://docs.min.io/docs/distributed-minio-quickstart-guide.html' + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for the + Minio pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + secret: + description: 'Optionally enter the name of already existing secret. + Secret should have two values: "accesskey" and "secretkey". + Required when "ExternalURL" is set.' + type: string + storageSize: + description: Defines the storage size for Minio. ie 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the + pod to fit on a node. Selector which must match a node''s labels + for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + readinessProbe: + description: Defines the probe to check if the application is ready + to accept traffic. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. + TCP hooks not yet supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + replicas: + description: Replicas defines the number of replicas to use for the + Mattermost app servers. Setting this will override the number of + replicas set by 'Size'. + format: int32 + type: integer + resourceLabels: + additionalProperties: + type: string + type: object + resources: + description: Defines the resource requests and limits for the Mattermost + app server pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceAnnotations: + additionalProperties: + type: string + type: object + size: + description: 'Size defines the size of the ClusterInstallation. This + is typically specified in number of users. This will override replica + and resource requests/limits appropriately for the provided number + of users. This is a write-only field - its value is erased after + setting appropriate values of resources. Accepted values are: 100users, + 1000users, 5000users, 10000users, 250000users. If replicas and resource + requests/limits are not specified, and Size is not provided the + configuration for 5000users will be applied. Setting ''Replicas'', + ''Resources'', ''Minio.Replicas'', ''Minio.Resource'', ''Database.Replicas'', + or ''Database.Resources'' will override the values set by Size. + Setting new Size will override previous values regardless if set + by Size or manually.' + type: string + useIngressTLS: + type: boolean + useServiceLoadBalancer: + type: boolean + version: + description: Version defines the ClusterInstallation Docker image + version. + type: string + required: + - ingressName + type: object + status: + description: 'Most recent observed status of the Mattermost cluster. Read-only. + Not included when requesting from the apiserver, only from the Mattermost + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status' + properties: + blueName: + description: The name of the blue deployment in BlueGreen + type: string + endpoint: + description: The endpoint to access the Mattermost instance + type: string + greenName: + description: The name of the green deployment in BlueGreen + type: string + image: + description: The image running on the pods in the Mattermost instance + type: string + migration: + description: The status of migration to Mattermost CR. + properties: + error: + type: string + status: + type: string + type: object + replicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment + format: int32 + type: integer + state: + description: Represents the running state of the Mattermost instance + type: string + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment that are running with the desired image. + format: int32 + type: integer + version: + description: The version currently running in the Mattermost instance + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: mattermostrestoredbs.mattermost.com +spec: + group: mattermost.com + names: + kind: MattermostRestoreDB + listKind: MattermostRestoreDBList + plural: mattermostrestoredbs + singular: mattermostrestoredb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of Mattermost DB Restore + jsonPath: .status.state + name: State + type: string + - description: Original DB Replicas + jsonPath: .status.originalDBReplicas + name: Original DB Replicas + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: MattermostRestoreDB is the Schema for the mattermostrestoredbs + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MattermostRestoreDBSpec defines the desired state of MattermostRestoreDB + properties: + initBucketURL: + description: InitBucketURL defines where the DB backup file is located. + type: string + mattermostClusterName: + description: MattermostClusterName defines the ClusterInstallation + name. + type: string + mattermostDBName: + description: MattermostDBName defines the database name. Need to set + if different from `mattermost`. + type: string + mattermostDBPassword: + description: MattermostDBPassword defines the user password to access + the database. Need to set if the user is different from the one + created by the operator. + type: string + mattermostDBUser: + description: MattermostDBUser defines the user to access the database. + Need to set if the user is different from `mmuser`. + type: string + restoreSecret: + description: RestoreSecret defines the secret that holds the credentials + to MySQL Operator be able to download the DB backup file + type: string + type: object + status: + description: MattermostRestoreDBStatus defines the observed state of MattermostRestoreDB + properties: + originalDBReplicas: + description: The original number of database replicas. will be used + to restore after applying the db restore process. + format: int32 + type: integer + state: + description: Represents the state of the Mattermost restore Database. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: mattermosts.installation.mattermost.com +spec: + group: installation.mattermost.com + names: + kind: Mattermost + listKind: MattermostList + plural: mattermosts + shortNames: + - mm + singular: mattermost + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of Mattermost + jsonPath: .status.state + name: State + type: string + - description: Image of Mattermost + jsonPath: .status.image + name: Image + type: string + - description: Version of Mattermost + jsonPath: .status.version + name: Version + type: string + - description: Endpoint + jsonPath: .status.endpoint + name: Endpoint + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: Mattermost is the Schema for the mattermosts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MattermostSpec defines the desired state of Mattermost + properties: + database: + description: External Services + properties: + external: + description: Defines the configuration of and external database. + properties: + secret: + description: 'Secret contains data necessary to connect to + the external database. The Kubernetes Secret should contain: - + Key: DB_CONNECTION_STRING | Value: Full database connection + string. It can also contain optional fields, such as: - + Key: MM_SQLSETTINGS_DATASOURCEREPLICAS | Value: Connection + string to read replicas of the database. - Key: DB_CONNECTION_CHECK_URL + | Value: The URL used for checking that the database is + accessible.' + type: string + type: object + operatorManaged: + description: Defines the configuration of database managed by + Kubernetes operator. + properties: + backupRemoteDeletePolicy: + description: Defines the backup retention policy. + type: string + backupRestoreSecretName: + description: Defines the secret to be used when performing + a database restore. + type: string + backupSchedule: + description: Defines the interval for backups in cron expression + format. + type: string + backupSecretName: + description: Defines the secret to be used for uploading/restoring + backup. + type: string + backupURL: + description: Defines the object storage url for uploading + backups. + type: string + initBucketURL: + description: Defines the AWS S3 bucket where the Database + Backup is stored. The operator will download the file to + restore the data. + type: string + replicas: + description: Defines the number of database replicas. For + redundancy use at least 2 replicas. Setting this will override + the number of replicas set by 'Size'. + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for + the database pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storageSize: + description: Defines the storage size for the database. ie + 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: + description: Defines the type of database to use for an Operator-Managed + database. + type: string + type: object + type: object + elasticSearch: + description: ElasticSearch defines the ElasticSearch configuration + for Mattermost. + properties: + host: + type: string + password: + type: string + username: + type: string + type: object + fileStore: + description: FileStore defines the file store configuration for Mattermost. + properties: + external: + description: Defines the configuration of an external file store. + properties: + bucket: + description: Set to the bucket name of your external MinIO + or S3. + type: string + secret: + description: 'Optionally enter the name of already existing + secret. Secret should have two values: "accesskey" and "secretkey".' + type: string + url: + description: Set to use an external MinIO deployment or S3. + type: string + type: object + operatorManaged: + description: Defines the configuration of file store managed by + Kubernetes operator. + properties: + replicas: + description: 'Defines the number of Minio replicas. Supply + 1 to run Minio in standalone mode with no redundancy. Supply + 4 or more to run Minio in distributed mode. Note that it + is not possible to upgrade Minio from standalone to distributed + mode. Setting this will override the number of replicas + set by ''Size''. More info: https://docs.min.io/docs/distributed-minio-quickstart-guide.html' + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for + the Minio pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storageSize: + description: Defines the storage size for Minio. ie 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: object + type: object + image: + description: Image defines the Mattermost Docker image. + type: string + imagePullPolicy: + description: Specify Mattermost deployment pull policy. + type: string + imagePullSecrets: + description: Specify Mattermost image pull secrets. + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressName: + description: IngressName defines the name to be used when creating + the ingress rules + type: string + licenseSecret: + description: LicenseSecret is the name of the secret containing a + Mattermost license. + type: string + mattermostEnv: + description: Optional environment variables to set in the Mattermost + application pods. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + probes: + description: Probes defines configuration of liveness and readiness + probe for Mattermost pods. These settings generally don't need to + be changed. + properties: + livenessProbe: + description: Defines the probe to check if the application is + up and running. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + readinessProbe: + description: Defines the probe to check if the application is + ready to accept traffic. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + type: object + replicas: + description: Replicas defines the number of replicas to use for the + Mattermost app servers. + format: int32 + type: integer + resourceLabels: + additionalProperties: + type: string + type: object + scheduling: + description: Scheduling defines the configuration related to scheduling + of the Mattermost pods as well as resource constraints. These settings + generally don't need to be changed. + properties: + affinity: + description: If specified, affinity will define the pod's scheduling + constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from + its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them are + ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node that + violates one or more of the expressions. The node that + is most preferred is the one with the greatest sum of + weights, i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + anti-affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod + label update), the system may or may not try to eventually + evict the pod from its node. When there are multiple + elements, the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for + the pod to fit on a node. Selector which must match a node''s + labels for the pod to be scheduled on that node. More info: + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + resources: + description: Defines the resource requests and limits for the + Mattermost app server pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + type: object + serviceAnnotations: + additionalProperties: + type: string + type: object + size: + description: 'Size defines the size of the Mattermost. This is typically + specified in number of users. This will override replica and resource + requests/limits appropriately for the provided number of users. + This is a write-only field - its value is erased after setting appropriate + values of resources. Accepted values are: 100users, 1000users, 5000users, + 10000users, and 250000users. If replicas and resource requests/limits + are not specified, and Size is not provided the configuration for + 5000users will be applied. Setting ''Replicas'', ''Scheduling.Resources'', + ''FileStore.Replicas'', ''FileStore.Resource'', ''Database.Replicas'', + or ''Database.Resources'' will override the values set by Size. + Setting new Size will override previous values regardless if set + by Size or manually.' + type: string + useIngressTLS: + type: boolean + useServiceLoadBalancer: + type: boolean + version: + description: Version defines the Mattermost Docker image version. + type: string + volumeMounts: + description: Defines additional volumeMounts to add to Mattermost + application pods. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows for mounting volumes from various sources + into the Mattermost application pods. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver (Alpha feature). The volume's + lifecycle is tied to the pod that defines it - it will be + created before the pod starts, and deleted when the pod is + removed. \n Use this if: a) the volume is only needed while + the pod runs, b) features of normal volumes like restoring + from snapshot or capacity tracking are needed, c) the storage + driver is specified through a storage class, and d) the storage + driver supports dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) * An + existing custom resource that implements data + population (Alpha) In order to use custom resource + types that implement data population, the AnyVolumeDataSource + feature gate must be enabled. If the provisioner + or an external controller can support the specified + data source, it will create a new volume based + on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - ingressName + type: object + status: + description: MattermostStatus defines the observed state of Mattermost + properties: + endpoint: + description: The endpoint to access the Mattermost instance + type: string + image: + description: The image running on the pods in the Mattermost instance + type: string + replicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment + format: int32 + type: integer + state: + description: Represents the running state of the Mattermost instance + type: string + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment that are running with the desired image. + format: int32 + type: integer + version: + description: The version currently running in the Mattermost instance + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mattermost-operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: mattermost-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - '*' +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - get + - create + - list + - delete + - watch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - mattermost-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - mattermost.com + resources: + - '*' + - clusterinstallations + - mattermostrestoredbs + verbs: + - '*' +- apiGroups: + - installation.mattermost.com + resources: + - '*' + verbs: + - '*' +- apiGroups: + - mysql.presslabs.org + resources: + - mysqlbackups + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - mysql.presslabs.org + resources: + - mysqlclusters + - mysqlclusters/status + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - miniocontroller.min.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - minio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - create + - list + - delete + - watch + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + - certificatesigningrequests/approval + - certificatesigningrequests/status + verbs: + - update + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: mattermost-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mattermost-operator +subjects: +- kind: ServiceAccount + name: mattermost-operator + namespace: mattermost-operator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mattermost-operator +spec: + replicas: 1 + selector: + matchLabels: + name: mattermost-operator + template: + metadata: + labels: + name: mattermost-operator + spec: + containers: + - args: + - --enable-leader-election + command: + - /mattermost-operator + env: + - name: MAX_RECONCILING_INSTALLATIONS + value: "20" + - name: REQUEUE_ON_LIMIT_DELAY + value: 20s + image: mattermost/mattermost-operator:v1.14.0 + imagePullPolicy: IfNotPresent + name: mattermost-operator + serviceAccountName: mattermost-operator +--- diff --git a/ii/cluster/mattermost-postgresql.yaml b/ii/cluster/mattermost-postgresql.yaml new file mode 100644 index 0000000..a8fd072 --- /dev/null +++ b/ii/cluster/mattermost-postgresql.yaml @@ -0,0 +1,30 @@ +# Install Postgresql-HA +# #+name: postgres-database + +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: mattermost-db +spec: + enableConnectionPooler: true + connectionPooler: + mode: session + resources: + requests: + cpu: 250m + memory: 100Mi + limits: + cpu: "1" + memory: 100Mi + teamId: "mattermost" + volume: + size: 50Gi + numberOfInstances: 3 + users: + mattermost: # database owner + - superuser + - createdb + databases: + mattermost: mattermost # dbname: owner + postgresql: + version: "13" diff --git a/ii/cluster/mattermost.yaml b/ii/cluster/mattermost.yaml new file mode 100644 index 0000000..7b7515a --- /dev/null +++ b/ii/cluster/mattermost.yaml @@ -0,0 +1,131 @@ +# Operator configuration +# #+name: mattermost-cluster-definition + +apiVersion: installation.mattermost.com/v1beta1 +kind: Mattermost +metadata: + name: mattermost # Name of your cluster as shown in Kubernetes. + namespace: mattermost +spec: + database: + external: + secret: mattermost-database + elasticSearch: {} + fileStore: + operatorManaged: + replicas: 1 + resources: + requests: + cpu: 150m + memory: 512Mi + storageSize: 50Gi + image: mattermost/mattermost-enterprise-edition + imagePullPolicy: IfNotPresent + ingressAnnotations: + kubernetes.io/ingress.class: nginx + ingressName: mattermost.ii.coop + probes: + livenessProbe: {} + readinessProbe: {} + replicas: 1 + scheduling: + affinity: {} + resources: + limits: + cpu: "2" + memory: 4Gi + requests: + cpu: 150m + memory: 256Mi + useIngressTLS: true + version: "5.39" + mattermostEnv: + - name: MM_GITLABSETTINGS_ENABLE + value: "true" + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL + value: "true" + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME + value: "true" + - name: MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS + value: "true" + - name: MM_EMAILSETTINGS_REQUIREEMAILVERIFICATION + value: "false" + - name: MM_EMAILSETTINGS_FEEDBACKEMAIL + value: "mattermost@ii.coop" + - name: MM_EMAILSETTINGS_FEEDBACKORGANIZATION + value: "ii.coop" + - name: MM_EMAILSETTINGS_SENDEMAILNOTIFICATIONS + value: "true" + - name: MM_EMAILSETTINGS_SENDPUSHNOTIFICATIONS + value: "true" + - name: MM_EMAILSETTINGS_PUSHNOTIFICATIONSERVER + value: "https://push-test.mattermost.com" + - name: MM_EMAILSETTINGS_PUSHNOTIFICATIONCONTENTS + value: "full" + - name: MM_EMAILSETTINGS_ENABLESMTPAUTH + value: "true" + - name: MM_GITLABSETTINGS_ID + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_ID + - name: MM_GITLABSETTINGS_SECRET + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_SECRET + - name: MM_GITLABSETTINGS_AUTHENDPOINT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_AUTHENDPOINT + - name: MM_GITLABSETTINGS_TOKENENDPOINT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_TOKENENDPOINT + - name: MM_GITLABSETTINGS_USERAPIENDPOINT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_GITLABSETTINGS_USERAPIENDPOINT + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_ENABLESIGNINWITHEMAIL + - name: MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_ENABLESIGNINWITHUSERNAME + - name: MM_EMAILSETTINGS_FEEDBACKNAME + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_FEEDBACKNAME + - name: MM_EMAILSETTINGS_SMTPUSERNAME + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPUSERNAME + - name: MM_EMAILSETTINGS_SMTPPASSWORD + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPPASSWORD + - name: MM_EMAILSETTINGS_SMTPSERVER + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPSERVER + - name: MM_EMAILSETTINGS_SMTPPORT + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_SMTPPORT + - name: MM_EMAILSETTINGS_CONNECTIONSECURITY + valueFrom: + secretKeyRef: + name: mattermost-user-config + key: MM_EMAILSETTINGS_CONNECTIONSECURITY diff --git a/ii/cluster/metallb-namespace.yaml b/ii/cluster/metallb-namespace.yaml new file mode 100644 index 0000000..003269b --- /dev/null +++ b/ii/cluster/metallb-namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: metallb-system + labels: + app: metallb diff --git a/ii/cluster/metallb-system-config.yaml b/ii/cluster/metallb-system-config.yaml new file mode 100644 index 0000000..6eceda4 --- /dev/null +++ b/ii/cluster/metallb-system-config.yaml @@ -0,0 +1,16 @@ + + +# #+NAME: MetalLB system config + +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - ${LOAD_BALANCER_IP}/32 diff --git a/ii/cluster/metallb.yaml b/ii/cluster/metallb.yaml new file mode 100644 index 0000000..6ed6f10 --- /dev/null +++ b/ii/cluster/metallb.yaml @@ -0,0 +1,443 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +spec: + allowPrivilegeEscalation: false + allowedCapabilities: [] + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_RAW + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + hostIPC: false + hostNetwork: true + hostPID: false + hostPorts: + - max: 7472 + min: 7472 + - max: 7946 + min: 7946 + privileged: true + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - services/status + verbs: + - update +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: +- apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - secrets + verbs: + - create +- apiGroups: + - '' + resources: + - secrets + resourceNames: + - memberlist + verbs: + - list +- apiGroups: + - apps + resources: + - deployments + resourceNames: + - controller + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: config-watcher +subjects: +- kind: ServiceAccount + name: controller +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: controller +subjects: +- kind: ServiceAccount + name: controller +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port=7472 + - --config=config + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + # needed when another software is also using memberlist / port 7946 + # when changing this default you also need to update the container ports definition + # and the PodSecurityPolicy hostPorts definition + #- name: METALLB_ML_BIND_PORT + # value: "7946" + - name: METALLB_ML_LABELS + value: "app=metallb,component=speaker" + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + name: memberlist + key: secretkey + image: quay.io/metallb/speaker:v0.10.2 + name: speaker + ports: + - containerPort: 7472 + name: monitoring + - containerPort: 7946 + name: memberlist-tcp + - containerPort: 7946 + name: memberlist-udp + protocol: UDP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_RAW + drop: + - ALL + readOnlyRootFilesystem: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: controller + spec: + containers: + - args: + - --port=7472 + - --config=config + env: + - name: METALLB_ML_SECRET_NAME + value: memberlist + - name: METALLB_DEPLOYMENT + value: controller + image: quay.io/metallb/controller:v0.10.2 + name: controller + ports: + - containerPort: 7472 + name: monitoring + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 diff --git a/ii/cluster/metrics-server.yaml b/ii/cluster/metrics-server.yaml new file mode 100644 index 0000000..d881419 --- /dev/null +++ b/ii/cluster/metrics-server.yaml @@ -0,0 +1,20 @@ + + +# #+NAME: Metrics-Server + +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: metrics-server + namespace: kube-system +spec: + releaseName: metrics-server + chart: + repository: https://olemarkus.github.io/metrics-server + name: metrics-server + version: 2.11.2 + values: + args: + - --logtostderr + - --kubelet-preferred-address-types=InternalIP + - --kubelet-insecure-tls diff --git a/ii/cluster/minio-operator.yaml b/ii/cluster/minio-operator.yaml new file mode 100644 index 0000000..3e7df27 --- /dev/null +++ b/ii/cluster/minio-operator.yaml @@ -0,0 +1,131 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: minioinstances.miniocontroller.min.io +spec: + group: miniocontroller.min.io + version: v1beta1 + scope: Namespaced + names: + kind: MinIOInstance + singular: minioinstance + plural: minioinstances + preserveUnknownFields: true + validation: + # openAPIV3Schema is the schema for validating custom objects. + # Refer https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#specifying-a-structural-schema + # for more details + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + replicas: + type: integer + minimum: 1 + maximum: 32 + version: + type: string + mountpath: + type: string + subpath: + type: string + additionalPrinterColumns: + - name: Replicas + type: integer + JSONPath: ".spec.replicas" +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: minio-operator-role +rules: +- apiGroups: + - "" + resources: + - namespaces + - secrets + - pods + - services + - events + verbs: + - get + - watch + - create + - list + - patch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - create + - list + - patch + - watch + - update +- apiGroups: + - "certificates.k8s.io" + resources: + - "certificatesigningrequests" + - "certificatesigningrequests/approval" + - "certificatesigningrequests/status" + verbs: + - update + - create + - get +- apiGroups: + - miniocontroller.min.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - min.io + resources: + - "*" + verbs: + - "*" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: minio-operator-sa + namespace: minio-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: minio-operator-binding + namespace: minio-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: minio-operator-role +subjects: +- kind: ServiceAccount + name: minio-operator-sa + namespace: minio-operator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio-operator + namespace: minio-operator +spec: + replicas: 1 + selector: + matchLabels: + name: minio-operator + template: + metadata: + labels: + name: minio-operator + spec: + serviceAccountName: minio-operator-sa + containers: + - name: minio-operator + image: minio/k8s-operator:1.0.7 + imagePullPolicy: IfNotPresent diff --git a/ii/cluster/nginx-ingress.yaml b/ii/cluster/nginx-ingress.yaml new file mode 100644 index 0000000..31c6476 --- /dev/null +++ b/ii/cluster/nginx-ingress.yaml @@ -0,0 +1,45 @@ + + +# #+NAME: nginx-ingress + +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + releaseName: nginx-ingress + chart: + repository: https://kubernetes.github.io/ingress-nginx + name: ingress-nginx + version: 4.0.3 + values: + controller: + ingressClassResource: + default: true + service: + externalTrafficPolicy: Local + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + publishService: + enabled: true + autoscaling: + enabled: true + minReplicas: 3 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + minAvailable: 3 + metrics: + enabled: true + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + topologyKey: "kubernetes.io/hostname" diff --git a/ii/cluster/postgres-operator-configmap.yaml b/ii/cluster/postgres-operator-configmap.yaml new file mode 100644 index 0000000..309180c --- /dev/null +++ b/ii/cluster/postgres-operator-configmap.yaml @@ -0,0 +1,140 @@ + + +# #+NAME: Postgres operator + +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + # additional_pod_capabilities: "SYS_NICE" + # additional_secret_mount: "some-secret-name" + # additional_secret_mount_path: "/some/dir" + api_port: "8080" + aws_region: eu-central-1 + cluster_domain: cluster.local + cluster_history_entries: "1000" + cluster_labels: application:spilo + cluster_name_label: cluster-name + # connection_pooler_default_cpu_limit: "1" + # connection_pooler_default_cpu_request: "500m" + # connection_pooler_default_memory_limit: 100Mi + # connection_pooler_default_memory_request: 100Mi + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18" + # connection_pooler_max_db_connections: 60 + # connection_pooler_mode: "transaction" + # connection_pooler_number_of_instances: 2 + # connection_pooler_schema: "pooler" + # connection_pooler_user: "pooler" + # custom_service_annotations: "keyx:valuez,keya:valuea" + # custom_pod_annotations: "keya:valuea,keyb:valueb" + db_hosted_zone: db.example.com + debug_logging: "true" + # default_cpu_limit: "1" + # default_cpu_request: 100m + # default_memory_limit: 500Mi + # default_memory_request: 100Mi + # delete_annotation_date_key: delete-date + # delete_annotation_name_key: delete-clustername + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1 + # downscaler_annotations: "deployment-time,downscaler/*" + # enable_admin_role_for_users: "true" + # enable_crd_validation: "true" + # enable_cross_namespace_secret: "false" + # enable_database_access: "true" + enable_ebs_gp3_migration: "false" + # enable_ebs_gp3_migration_max_size: "1000" + # enable_init_containers: "true" + # enable_lazy_spilo_upgrade: "false" + enable_master_load_balancer: "false" + enable_pgversion_env_var: "true" + enable_pod_antiaffinity: "true" + enable_pod_disruption_budget: "true" + # enable_postgres_team_crd: "false" + # enable_postgres_team_crd_superusers: "false" + enable_replica_load_balancer: "false" + # enable_shm_volume: "true" + # enable_sidecars: "true" + enable_spilo_wal_path_compat: "true" + enable_team_member_deprecation: "false" + # enable_team_superuser: "false" + enable_teams_api: "false" + # etcd_host: "" + external_traffic_policy: "Cluster" + # gcp_credentials: "" + # kubernetes_use_configmaps: "false" + # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" + # infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole" + # inherited_annotations: owned-by + # inherited_labels: application,environment + # kube_iam_role: "" + # log_s3_bucket: "" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0" + # logical_backup_google_application_credentials: "" + logical_backup_job_prefix: "logical-backup-" + logical_backup_provider: "s3" + # logical_backup_s3_access_key_id: "" + logical_backup_s3_bucket: "my-bucket-url" + # logical_backup_s3_region: "" + # logical_backup_s3_endpoint: "" + # logical_backup_s3_secret_access_key: "" + logical_backup_s3_sse: "AES256" + logical_backup_schedule: "30 00 * * *" + major_version_upgrade_mode: "manual" + master_dns_name_format: "{cluster}.{team}.{hostedzone}" + # master_pod_move_timeout: 20m + # max_instances: "-1" + # min_instances: "-1" + # min_cpu_limit: 250m + # min_memory_limit: 250Mi + # minimal_major_version: "9.5" + # node_readiness_label: "" + # oauth_token_secret_name: postgresql-operator + # pam_configuration: | + # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees + # pam_role_name: zalandos + pdb_name_format: "postgres-{cluster}-pdb" + pod_antiaffinity_topology_key: "kubernetes.io/hostname" + pod_deletion_wait_timeout: 10m + # pod_environment_configmap: "default/my-custom-config" + # pod_environment_secret: "my-custom-secret" + pod_label_wait_timeout: 10m + pod_management_policy: "ordered_ready" + # pod_priority_class_name: "postgres-pod-priority" + pod_role_label: spilo-role + # pod_service_account_definition: "" + pod_service_account_name: "postgres-pod" + # pod_service_account_role_binding_definition: "" + pod_terminate_grace_period: 5m + # postgres_superuser_teams: "postgres_superusers" + # protected_role_names: "admin" + ready_wait_interval: 3s + ready_wait_timeout: 30s + repair_period: 5m + replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" + replication_username: standby + resource_check_interval: 3s + resource_check_timeout: 10m + resync_period: 30m + ring_log_lines: "100" + role_deletion_suffix: "_deleted" + secret_name_template: "{username}.{cluster}.credentials" + # sidecar_docker_images: "" + # set_memory_request_to_limit: "false" + spilo_allow_privilege_escalation: "true" + # spilo_runasuser: 101 + # spilo_runasgroup: 103 + # spilo_fsgroup: 103 + spilo_privileged: "false" + storage_resize_mode: "pvc" + super_username: postgres + # target_major_version: "13" + # team_admin_role: "admin" + # team_api_role_configuration: "log_statement:all" + # teams_api_url: http://fake-teams-api.default.svc.cluster.local + # toleration: "" + # wal_az_storage_account: "" + # wal_gs_bucket: "" + # wal_s3_bucket: "" + watched_namespace: "*" # listen to all namespaces + workers: "8" diff --git a/ii/cluster/powerdns.yaml b/ii/cluster/powerdns.yaml new file mode 100644 index 0000000..b92b0ac --- /dev/null +++ b/ii/cluster/powerdns.yaml @@ -0,0 +1,51 @@ + + +# #+NAME: PowerDNS + +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: powerdns +spec: + releaseName: powerdns + chart: + git: https://github.com/sharingio/helm-charts + ref: master + path: charts/powerdns + values: + domain: gitlab-staging.ii.coop + default_soa_name: gitlab-staging.ii.coop + apikey: pairingissharing + powerdns: + default_ttl: 3600 + soa_minimum_ttl: 3600 + domain: gitlab-staging.ii.coop + default_soa_name: gitlab-staging.ii.coop + mysql_host: powerdns-service-db + mysql_user: powerdns + extraEnv: + - name: PDNS_dnsupdate + value: "yes" + - name: PDNS_allow_dnsupdate_from + value: "192.168.0.0/24" + service: + dns: + tcp: + enabled: true + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + externalIPs: + - ${LOAD_BALANCER_IP} + udp: + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress + externalIPs: + - ${LOAD_BALANCER_IP} + mariadb: + mysql_pass: pairingissharing + mysql_rootpass: pairingissharing + admin: + enabled: false + ingress: + enabled: false + secret: pairingissharing diff --git a/ii/cluster/rook-ceph-cluster.yaml b/ii/cluster/rook-ceph-cluster.yaml new file mode 100644 index 0000000..c5a2139 --- /dev/null +++ b/ii/cluster/rook-ceph-cluster.yaml @@ -0,0 +1,249 @@ +################################################################################################################# +# Define the settings for the rook-ceph cluster with common settings for a production cluster. +# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required +# in this example. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v13 is mimic, v14 is nautilus, and v15 is octopus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v15.2.8-20201217 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: ceph/ceph:v16.2.5 + # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. + # Future versions such as `pacific` would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + mon: + # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR. + - name: pg_autoscaler + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: rook-ceph + network: + # enable host networking + #provider: host + # EXPERIMENTAL: enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + #public: public-conf --> NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# topologySpreadConstraints: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# Monitor deployments may contain an anti-affinity rule for avoiding monitor +# collocation on the same node. This is a required rule when host network is used +# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a +# preferred rule with weight: 50. +# osd: +# mgr: +# cleanup: + annotations: +# all: +# mon: +# osd: +# cleanup: +# prepareosd: +# If no mgr annotations are set, prometheus scrape annotations will be set by default. +# mgr: + labels: +# all: +# mon: +# osd: +# cleanup: +# mgr: +# prepareosd: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: +# prepareosd: +# crashcollector: +# logcollector: +# cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false +# priorityClassNames: +# all: rook-ceph-default-priority-class +# mon: rook-ceph-mon-priority-class +# osd: rook-ceph-osd-priority-class +# mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + deviceFilter: "^sd[c-f]" + # config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: false + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false diff --git a/ii/cluster/rook-ceph-dashboard.yaml b/ii/cluster/rook-ceph-dashboard.yaml new file mode 100644 index 0000000..4b94a37 --- /dev/null +++ b/ii/cluster/rook-ceph-dashboard.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: rook-ceph-mgr-dashboard-external-https + namespace: rook-ceph + labels: + app: rook-ceph-mgr + rook_cluster: rook-ceph +spec: + ports: + - name: dashboard + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app: rook-ceph-mgr + rook_cluster: rook-ceph + sessionAffinity: None + type: NodePort diff --git a/ii/cluster/rook-ceph-pool-storageclass.yaml b/ii/cluster/rook-ceph-pool-storageclass.yaml new file mode 100644 index 0000000..02e3fe2 --- /dev/null +++ b/ii/cluster/rook-ceph-pool-storageclass.yaml @@ -0,0 +1,59 @@ +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + failureDomain: host + replicated: + size: 3 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block + annotations: + storageclass.kubernetes.io/is-default-class: "true" +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.rbd.csi.ceph.com +parameters: + # clusterID is the namespace where the rook cluster is running + clusterID: rook-ceph + # Ceph pool into which the RBD image shall be created + pool: replicapool + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# Delete the rbd volume when a PVC is deleted +reclaimPolicy: Delete diff --git a/ii/cluster/rook-ceph-pvc-shared-test.yaml b/ii/cluster/rook-ceph-pvc-shared-test.yaml new file mode 100644 index 0000000..7389f4f --- /dev/null +++ b/ii/cluster/rook-ceph-pvc-shared-test.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-shared-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-shared diff --git a/ii/cluster/rook-ceph-pvc-test.yaml b/ii/cluster/rook-ceph-pvc-test.yaml new file mode 100644 index 0000000..86b1e63 --- /dev/null +++ b/ii/cluster/rook-ceph-pvc-test.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-block +--- +apiVersion: v1 +kind: Pod +metadata: + name: rook-ceph-pvc-test +spec: + nodeName: ii-coop-control-plane-nbvt9 + containers: + - name: rook-ceph-pvc-test + image: alpine:3.12 + command: + - sleep + - infinity + volumeMounts: + - name: rook-ceph-pvc-test + mountPath: /mnt + volumes: + - name: rook-ceph-pvc-test + persistentVolumeClaim: + claimName: rook-ceph-pvc-test diff --git a/ii/cluster/rook-ceph-shared-pool-storageclass.yaml b/ii/cluster/rook-ceph-shared-pool-storageclass.yaml new file mode 100644 index 0000000..5c6ef50 --- /dev/null +++ b/ii/cluster/rook-ceph-shared-pool-storageclass.yaml @@ -0,0 +1,52 @@ + + +# #+NAME: RWM storageClass + +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: rook-ceph-shared + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: 3 + dataPools: + - replicated: + size: 3 + preservePoolsOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-shared +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.cephfs.csi.ceph.com +parameters: + # clusterID is the namespace where operator is deployed. + clusterID: rook-ceph + + # CephFS filesystem name into which the volume shall be created + fsName: rook-ceph-shared + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: rook-ceph-shared-data0 + + # Root path of an existing CephFS volume + # Required for provisionVolume: "false" + # rootPath: /absolute/path + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + +reclaimPolicy: Delete diff --git a/ii/cluster/rook-ceph-toolbox.yaml b/ii/cluster/rook-ceph-toolbox.yaml new file mode 100644 index 0000000..5985021 --- /dev/null +++ b/ii/cluster/rook-ceph-toolbox.yaml @@ -0,0 +1,59 @@ +# Debug + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: rook-ceph-tools + namespace: rook-ceph + labels: + app: rook-ceph-tools +spec: + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: rook/ceph:v1.7.2 + command: ["/tini"] + args: ["-g", "--", "/usr/local/bin/toolbox.sh"] + imagePullPolicy: IfNotPresent + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-username + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-secret + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - name: mon-endpoint-volume + mountPath: /etc/rook + volumes: + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + - name: ceph-config + emptyDir: {} + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 5 diff --git a/ii/cluster/wireguard.yaml b/ii/cluster/wireguard.yaml new file mode 100644 index 0000000..6db695a --- /dev/null +++ b/ii/cluster/wireguard.yaml @@ -0,0 +1,101 @@ +# Configure + +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: wireguard + name: wireguard-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: wireguard-config + namespace: wireguard +data: + PUID: "1000" + PGID: "1000" + TZ: "Pacific/Auckland" + SERVERPORT: "51820" + SERVERURL: "wg.ii.coop" + PEERS: "30" + # PEERDNS: "10.43.0.30" + PEERDNS: "auto" + ALLOWEDIPS: "0.0.0.0/0, ::/0" + INTERNAL_SUBNET: "10.13.13.0" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: wireguard + namespace: wireguard + labels: + app: wireguard +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: wireguard + template: + metadata: + labels: + app: wireguard + spec: + containers: + - name: wireguard + image: docker.io/linuxserver/wireguard:v1.0.20210424-ls36 + envFrom: + - configMapRef: + name: wireguard-config + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - name: wg-config + mountPath: /config + - name: host-volumes + mountPath: /lib/modules + ports: + - containerPort: 51820 + protocol: UDP + resources: + requests: + memory: "64Mi" + cpu: "10m" + limits: + memory: "128Mi" + cpu: "100m" + volumes: + - name: wg-config + persistentVolumeClaim: + claimName: wireguard-pvc + - name: host-volumes + hostPath: + path: /lib/modules + type: Directory +--- +apiVersion: v1 +kind: Service +metadata: + name: wireguard + namespace: wireguard +spec: + selector: + app: wireguard + ports: + - name: wireguard + port: 51820 + targetPort: 51820 + protocol: UDP + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/ii/community-infra/README.org b/ii/community-infra/README.org new file mode 100644 index 0000000..9c21a7d --- /dev/null +++ b/ii/community-infra/README.org @@ -0,0 +1,878 @@ +#+TITLE: Community Infra + +#+begin_quote +ii's local Bay of Plenty community infrastucture for learning, developing, and pairing with locals. +#+end_quote + +* Plan +Provide local infrastructure, through servers that run Kubernetes; For an automated, reproducable, and accessible way to those in the local community. + +We will begin using Kubeadm for setting up Kubernetes, but will end up using Talos as the way to bring up Kubernetes. + +* Network +There are currently three servers installed, their IPs are: +- 10.8.11.201 +- 10.8.11.202 +- 10.8.11.203 + +* Set up Kubernetes +** Prepare +The set of commands to prepare the Ubuntu installs for Kubernetes +#+begin_src shell :tangle ./preKubeadmCommands.sh +#!/bin/bash +KUBERNETES_VERSION='1.21.2' + +PACKAGES=( + apt-transport-https + ca-certificates + cloud-utils + containerd + dnsutils + ebtables + gettext-base + git + jq + kitty-terminfo + prips + socat +) + +pwd +cd $(dirname $0) + +# ensure mounts +sed -ri '/\\sswap\\s/s/^#?/#/' /etc/fstab +swapoff -a +mount -a + +# install required packages +apt-get -y update +DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl software-properties-common +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list +apt-get update -y +TRIMMED_KUBERNETES_VERSION=$(echo $KUBERNETES_VERSION | sed 's/\./\\./g' | sed 's/^v//') +RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1) +apt-get install -y ${PACKAGES[*]} \ + kubelet=${RESOLVED_KUBERNETES_VERSION} \ + kubeadm=${RESOLVED_KUBERNETES_VERSION} \ + kubectl=${RESOLVED_KUBERNETES_VERSION} +systemctl daemon-reload + +# configure container runtime +cat </dev/null | openssl dgst -sha256 -hex | sed 's/^.* //') +#+end_src + +#+begin_src tmate :window community-infra +export JOIN_TOKEN=$(ssh root@10.8.11.201 -p 2222 kubeadm token list -o=jsonpath='{.token}') +#+end_src + +** Srv2 +Prepare +#+begin_src tmate :window community-infra +ssh root@10.8.11.202 -p 2222 bash -x ./preKubeadmCommands.sh +#+end_src + +Join +#+begin_src tmate :window community-infra +ssh root@10.8.11.202 -p 2222 kubeadm join 10.8.11.201:6443 \ + --token "${JOIN_TOKEN}" \ + --discovery-token-ca-cert-hash "sha256:${CA_CERT_HASH}" +#+end_src + +** Srv3 +Prepare +#+begin_src tmate :window community-infra +ssh root@10.8.11.203 -p 2222 bash -x ./preKubeadmCommands.sh +#+end_src + +Init +#+begin_src tmate :window community-infra +ssh root@10.8.11.203 -p 2222 \ + kubeadm join 10.8.11.201:6443 \ + --token "${JOIN_TOKEN}" \ + --discovery-token-ca-cert-hash "sha256:${CA_CERT_HASH}" +#+end_src + +* Finalising +** Get the Kubeconfig +#+begin_src tmate :window community-infra +ssh root@10.8.11.201 -p 2222 cat /etc/kubernetes/admin.conf > ~/.kube/config-ii-community-infra +#+end_src + +** Use the Kubeconfig +#+begin_src tmate :window community-infra +export KUBECONFIG=~/.kube/config-ii-community-infra +#+end_src + +** Install a CNI +Prepare cilium +#+begin_src shell :results silent +helm repo add cilium https://helm.cilium.io/ +helm template cilium cilium/cilium --version 1.10.4 \ + --namespace kube-system > ./cilium.yaml +#+end_src + +Install cilium +#+begin_src tmate :window community-infra +kubectl apply -f cilium.yaml +#+end_src + +** Untaint master for scheduling +#+begin_src tmate :window community-infra +kubectl taint node --all node-role.kubernetes.io/master- +#+end_src + +* Installing core services +** Rook+Ceph +*** Prepare +#+begin_src tmate :dir . :window community-infra +kubectl create ns rook-ceph --dry-run=client -o yaml | \ + kubectl apply -f - + +curl -s -L -o ./rook-ceph-common.yaml https://github.com/rook/rook/raw/v1.7.2/cluster/examples/kubernetes/ceph/common.yaml +curl -s -L -o ./rook-ceph-crds.yaml https://github.com/rook/rook/raw/v1.7.2/cluster/examples/kubernetes/ceph/crds.yaml +curl -s -L -o ./rook-ceph-operator.yaml https://github.com/rook/rook/raw/v1.7.2/cluster/examples/kubernetes/ceph/operator.yaml +#+end_src + +*** Configure +#+begin_src yaml :tangle ./rook-ceph-cluster.yaml +################################################################################################################# +# Define the settings for the rook-ceph cluster with common settings for a production cluster. +# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required +# in this example. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v13 is mimic, v14 is nautilus, and v15 is octopus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v15.2.8-20201217 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: ceph/ceph:v16.2.5 + # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. + # Future versions such as `pacific` would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + mon: + # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR. + - name: pg_autoscaler + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: rook-ceph + network: + # enable host networking + #provider: host + # EXPERIMENTAL: enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + #public: public-conf --> NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# topologySpreadConstraints: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# Monitor deployments may contain an anti-affinity rule for avoiding monitor +# collocation on the same node. This is a required rule when host network is used +# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a +# preferred rule with weight: 50. +# osd: +# mgr: +# cleanup: + annotations: +# all: +# mon: +# osd: +# cleanup: +# prepareosd: +# If no mgr annotations are set, prometheus scrape annotations will be set by default. +# mgr: + labels: +# all: +# mon: +# osd: +# cleanup: +# mgr: +# prepareosd: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: +# prepareosd: +# crashcollector: +# logcollector: +# cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false +# priorityClassNames: +# all: rook-ceph-default-priority-class +# mon: rook-ceph-mon-priority-class +# osd: rook-ceph-osd-priority-class +# mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + deviceFilter: "^sd[bdef]" + # config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: false + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false +#+end_src +#+begin_src yaml :tangle ./rook-ceph-pool-storageclass.yaml +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + failureDomain: host + replicated: + size: 3 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block + annotations: + storageclass.kubernetes.io/is-default-class: "true" +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.rbd.csi.ceph.com +parameters: + # clusterID is the namespace where the rook cluster is running + clusterID: rook-ceph + # Ceph pool into which the RBD image shall be created + pool: replicapool + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# Delete the rbd volume when a PVC is deleted +reclaimPolicy: Delete +#+end_src +#+begin_src yaml :tangle ./rook-ceph-dashboard.yaml +apiVersion: v1 +kind: Service +metadata: + name: rook-ceph-mgr-dashboard-external-https + namespace: rook-ceph + labels: + app: rook-ceph-mgr + rook_cluster: rook-ceph +spec: + ports: + - name: dashboard + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app: rook-ceph-mgr + rook_cluster: rook-ceph + sessionAffinity: None + type: NodePort +#+end_src + +*** Install +Install the Operator +#+begin_src tmate :dir . :window community-infra +kubectl apply -f ./rook-ceph-crds.yaml -f ./rook-ceph-common.yaml -f ./rook-ceph-operator.yaml +#+end_src + +Create a cluster +#+begin_src tmate :dir . :window community-infra +kubectl apply -f ./rook-ceph-cluster.yaml +#+end_src + +Expose the cluster as a StorageClass +#+begin_src tmate :dir . :window community-infra +kubectl apply -f ./rook-ceph-pool-storageclass.yaml +#+end_src + +Create an NodePort Service for the dashboard +#+begin_src tmate :dir . :window community-infra +kubectl apply -f ./rook-ceph-dashboard.yaml +#+end_src + +Show the credentials and access point for the dashboard +#+begin_src tmate :dir . :window community-infra +echo https://$(kubectl get node -o wide $(kubectl -n rook-ceph get pod -o wide | grep mgr | awk '{print $7}') | awk '{print $6}' | tail -1):$(kubectl -n rook-ceph get svc rook-ceph-mgr-dashboard-external-https -o=jsonpath='{.spec.ports[0].nodePort}') +echo admin :: $(kubectl -n rook-ceph get secrets rook-ceph-dashboard-password -o=jsonpath='{.data.password}' | base64 -d) +#+end_src + +*** Debug +Logs for rook-ceph-operator +#+begin_src tmate :dir . :window community-infra +kubectl -n rook-ceph logs -l app=rook-ceph-operator -f --tail=100 +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-toolbox.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: rook-ceph-tools + namespace: rook-ceph + labels: + app: rook-ceph-tools +spec: + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: rook/ceph:v1.7.2 + command: ["/tini"] + args: ["-g", "--", "/usr/local/bin/toolbox.sh"] + imagePullPolicy: IfNotPresent + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-username + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-secret + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - name: mon-endpoint-volume + mountPath: /etc/rook + volumes: + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + - name: ceph-config + emptyDir: {} + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 5 +#+end_src + +#+begin_src tmate :dir . :window community-infra +kubectl apply -f ./rook-ceph-toolbox.yaml +#+end_src + +#+begin_src tmate :dir . :window community-infra +kubectl -n rook-ceph exec -it daemonset/rook-ceph-tools -- bash +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-pvc-test.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-block +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-pvc-test +spec: + strategy: + type: Recreate + selector: + matchLabels: + name: rook-ceph-pvc-test + template: + metadata: + labels: + name: rook-ceph-pvc-test + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - rook-ceph-pvc-test + topologyKey: "kubernetes.io/hostname" + containers: + - name: rook-ceph-pvc-test + image: alpine:3.12 + command: + - sleep + - infinity + volumeMounts: + - name: rook-ceph-pvc-test + mountPath: /mnt + volumes: + - name: rook-ceph-pvc-test + persistentVolumeClaim: + claimName: rook-ceph-pvc-test +#+end_src + +Add the test PVC and Deployment +#+begin_src tmate :dir . :window community-infra +kubectl -n default apply -f ./rook-ceph-pvc-test.yaml +#+end_src + +Schedule to a new node +#+begin_src tmate :dir . :window community-infra +kubectl -n default rollout restart deployment rook-ceph-pvc-test +#+end_src + +Get a shell +#+begin_src tmate :dir . :window community-infra +kubectl -n default exec -it deployment/rook-ceph-pvc-test -- sh +#+end_src + +#+begin_src yaml :tangle ./rook-ceph-pvc-shared-test.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-shared-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-shared +#+end_src + +#+begin_src tmate :dir . :window community-infra +kubectl -n default apply -f ./rook-ceph-pvc-shared-test.yaml +#+end_src + +#+begin_src tmate :dir . :window community-infra +kubectl -n default describe pvc rook-ceph-pvc-shared-test +#+end_src + +** Helm-Operator +*** Prepare +#+begin_src tmate :window community-infra +kubectl create namespace helm-operator --dry-run=client -o yaml | \ + kubectl apply -f - +#+end_src +*** Install +#+begin_src tmate :window community-infra +kubectl apply \ + -f https://github.com/sharingio/.sharing.io/raw/main/cluster-api/manifests/helm-operator-crds.yaml \ + -f https://github.com/sharingio/.sharing.io/raw/main/cluster-api/manifests/helm-operator.yaml +#+end_src +* Install apps +** Humacs +*** Prepare +#+begin_src tmate :window community-infra +kubectl create namespace humacs --dry-run=client -o yaml | \ + kubectl apply -f - +#+end_src + +*** Configure +#+begin_src yaml :tangle ./humacs.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: humacs-home-ii + namespace: humacs +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi +--- +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: humacs + namespace: humacs +spec: + releaseName: humacs + chart: + git: https://github.com/humacs/humacs + ref: eaf562e067faa086d3165aba659fa52b727662d8 + path: chart/humacs + values: + initContainers: + - name: humacs-home-ii-fix-permissions + image: alpine:3.12 + command: + - sh + - -c + - chown 1000:1000 -R /home/ii && chown 1000 /run/containerd/containerd.sock + volumeMounts: + - mountPath: /home/ii + name: home-ii + - name: run-containerd-containerd-sock + mountPath: /run/containerd/containerd.sock + image: + repository: registry.gitlab.com/humacs/humacs/ii + tag: 2021.09.10.1346 + options: + hostDockerSocket: false + hostTmp: true + timezone: Pacific/Auckland + gitName: CloudNative.NZ + gitEmail: cloudnativenz-humacs-test@ii.coop + profile: "" + repos: + - https://github.com/ii/org + - https://gitlab.com/ii/nz + preinitScript: | + git clone "https://github.com/sharingio/.sharing.io" || \ + git clone https://github.com/sharingio/.sharing.io + . /home/ii/.sharing.io/sharingio-pair-preinit-script.sh + extraEnvVars: + - name: SHARINGIO_PAIR_NAME + value: "community-infra" + - name: SHARINGIO_PAIR_USER + value: "$SHARINGIO_PAIR_INSTANCE_SETUP_USER" + - name: SHARINGIO_PAIR_GUEST_NAMES + - name: SHARINGIO_PAIR_KUBERNETES_CONTROLPLANE_ENDPOINT + - name: SHARINGIO_PAIR_LOAD_BALANCER_IP + - name: HUMACS_DEBUG + value: "true" + - name: REINIT_HOME_FOLDER + value: "true" + - name: SHARINGIO_PAIR_BASE_DNS_NAME + value: "$SHARINGIO_PAIR_INSTANCE_SETUP_BASEDNSNAME" + - name: GITHUB_TOKEN + value: "$SHARINGIO_PAIR_INSTANCE_SETUP_GITHUBOAUTHTOKEN" + - name: CONTAINER_RUNTIME_ENDPOINT + value: unix:///run/containerd/containerd.sock + - name: CONTAINER_ADDRESS + value: /run/containerd/containerd.sock + - name: CONTAINERD_NAMESPACE + value: k8s.io + - name: K8S_NODE + valueFrom: + fieldRef: + fieldPath: spec.nodeName + extraVolumes: + - name: home-ii + hostPath: + path: /home/ii + - name: host + hostPath: + path: / + - name: run-containerd-containerd-sock + hostPath: + path: /run/containerd/containerd.sock + extraVolumeMounts: + - name: home-ii + mountPath: /home/ii + - name: host + mountPath: /var/run/host +#+end_src + +*** Install +#+begin_src tmate :window community-infra +kubectl -n humacs apply -f humacs.yaml +#+end_src + +* Tear down +#+begin_src tmate :window community-infra +for NODE in $(kubectl get nodes -o=jsonpath='{.items[*].metadata.name}'); do + NODE_INTERNAL_IP=$(kubectl get node "${NODE}" -o=jsonpath='{.status.addresses[?(@.type == "InternalIP")].address}') + for DEV in {b..f}; do + echo "${NODE} (${NODE_INTERNAL_IP}) /dev/sd${DEV}" + ssh -p 2222 root@${NODE_INTERNAL_IP} env DEV=$DEV bash << \EOF +set -x +hostname +yes | kubeadm reset +rm /etc/cni/net.d/* +rm -rf /var/lib/rook +for VG in $(lvs | grep osd | awk '{print $2}'); do + lvm vgremove $VG --force +done +lvs +wipefs --all /dev/sd${DEV} +lsblk -f /dev/sd${DEV} +EOF + done +done +#+end_src diff --git a/ii/community-infra/cilium.yaml b/ii/community-infra/cilium.yaml new file mode 100644 index 0000000..28a4837 --- /dev/null +++ b/ii/community-infra/cilium.yaml @@ -0,0 +1,768 @@ +--- +# Source: cilium/templates/cilium-agent-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium" + namespace: kube-system +--- +# Source: cilium/templates/cilium-operator-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "cilium-operator" + namespace: kube-system +--- +# Source: cilium/templates/cilium-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in a kvstore, etcd or consul, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + identity-allocation-mode: crd + cilium-endpoint-gc-interval: "5m0s" + + # If you want to run cilium in debug mode change this value to true + debug: "false" + # The agent can be put into the following three policy enforcement modes + # default, always and never. + # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes + enable-policy: "default" + + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "true" + + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "false" + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + enable-bpf-clock-probe: "true" + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: medium + + # The monitor aggregation interval governs the typical time between monitor + # notification events for each allowed connection. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-interval: 5s + + # The monitor aggregation flags determine which TCP flags which, upon the + # first observation, cause monitor notifications to be generated. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-flags: all + # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic + # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. + bpf-map-dynamic-size-ratio: "0.0025" + # bpf-policy-map-max specifies the maximum number of entries in endpoint + # policy map (per endpoint) + bpf-policy-map-max: "16384" + # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, + # backend and affinity maps. + bpf-lb-map-max: "65536" + # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass + # optimization for nodeport reverse NAT handling. + bpf-lb-external-clusterip: "false" + + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # As a result, reply packets may be dropped and the load-balancing decisions + # for established connections may change. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "false" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: default + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + cluster-id: "" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: vxlan + # Enables L7 proxy for L7 policy enforcement and visibility + enable-l7-proxy: "true" + + enable-ipv4-masquerade: "true" + enable-ipv6-masquerade: "true" + enable-bpf-masquerade: "true" + + enable-xt-socket-fallback: "true" + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + + auto-direct-node-routes: "false" + enable-bandwidth-manager: "false" + enable-local-redirect-policy: "false" + + kube-proxy-replacement: "disabled" + enable-health-check-nodeport: "true" + node-port-bind-protection: "true" + enable-auto-protect-node-port-range: "true" + enable-session-affinity: "true" + enable-endpoint-health-checking: "true" + enable-health-checking: "true" + enable-well-known-identities: "false" + enable-remote-node-identity: "true" + operator-api-serve-addr: "127.0.0.1:9234" + # Enable Hubble gRPC service. + enable-hubble: "true" + # UNIX domain socket for Hubble server to listen to. + hubble-socket-path: "/var/run/cilium/hubble.sock" + # An additional address for Hubble server to listen to (e.g. ":4244"). + hubble-listen-address: ":4244" + hubble-disable-tls: "false" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt + ipam: "cluster-pool" + cluster-pool-ipv4-cidr: "10.0.0.0/8" + cluster-pool-ipv4-mask-size: "24" + disable-cnp-status-updates: "true" + cgroup-root: "/run/cilium/cgroupv2" +--- +# Source: cilium/templates/cilium-agent-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - nodes + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + # Deprecated for removal in v1.10 + - create + - list + - watch + - update + + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + - ciliumegressnatpolicies + verbs: + - '*' +--- +# Source: cilium/templates/cilium-operator-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform LB IP allocation for BGP + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains `ToGroup` to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between multiple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +--- +# Source: cilium/templates/cilium-agent-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: "cilium" + namespace: kube-system +--- +# Source: cilium/templates/cilium-operator-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: "cilium-operator" + namespace: kube-system +--- +# Source: cilium/templates/cilium-agent-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: + # This annotation plus the CriticalAddonsOnly toleration makes + # cilium to be a critical pod in the cluster, which ensures cilium + # gets priority scheduling. + # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-app: cilium + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - cilium + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + # The initial delay for the liveness probe is intentionally large to + # avoid an endless kill & restart cycle if in the event that the initial + # bootstrapping takes longer than expected. + # Starting from Kubernetes 1.20, we are using startupProbe instead + # of this field. + initialDelaySeconds: 120 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true + image: "quay.io/cilium/cilium:v1.10.4@sha256:7d354052ccf2a7445101d78cebd14444c7c40129ce7889f2f04b89374dbf8a1d" + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - "/cni-install.sh" + - "--enable-debug=false" + - "--cni-exclusive=true" + preStop: + exec: + command: + - /cni-uninstall.sh + name: cilium-agent + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + # Needed to be able to load kernel modules + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/lib/cilium/tls/hubble + name: hubble-tls + readOnly: true + hostNetwork: true + initContainers: + # Required to mount cgroup2 filesystem on the underlying Kubernetes node. + # We use nsenter command with host's cgroup and mount namespaces enabled. + - name: mount-cgroup + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + command: + - sh + - -c + # The statically linked Go program binary is invoked to avoid any + # dependency on utilities like sh and mount that can be missing on certain + # distros installed on the underlying host. Copy the binary to the + # same directory where we install cilium cni plugin so that exec permissions + # are available. + - 'cp /usr/bin/cilium-mount /hostbin/cilium-mount && nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; rm /hostbin/cilium-mount' + image: "quay.io/cilium/cilium:v1.10.4@sha256:7d354052ccf2a7445101d78cebd14444c7c40129ce7889f2f04b89374dbf8a1d" + imagePullPolicy: IfNotPresent + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + securityContext: + privileged: true + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: "quay.io/cilium/cilium:v1.10.4@sha256:7d354052ccf2a7445101d78cebd14444c7c40129ce7889f2f04b89374dbf8a1d" + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + # Required to mount cgroup filesystem from the host to cilium agent pod + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup + mountPropagation: HostToContainer + - mountPath: /var/run/cilium + name: cilium-run + resources: + requests: + cpu: 100m + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: "cilium" + serviceAccountName: "cilium" + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + # To keep state between restarts / upgrades for bpf maps + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + # To mount cgroup2 filesystem on the host + - hostPath: + path: /proc + type: Directory + name: hostproc + # To keep state between restarts / upgrades for cgroup2 filesystem + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + # To install cilium cni plugin in the host + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + # To install cilium cni configuration in the host + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + # To be able to load kernel modules + - hostPath: + path: /lib/modules + name: lib-modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path + - name: hubble-tls + projected: + sources: + - secret: + name: hubble-server-certs + items: + - key: ca.crt + path: client-ca.crt + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key + optional: true +--- +# Source: cilium/templates/cilium-operator-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + io.cilium/app: operator + name: cilium-operator + name: cilium-operator + namespace: kube-system +spec: + # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go + # for more details. + replicas: 2 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + io.cilium/app: operator + name: cilium-operator + spec: + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: io.cilium/app + operator: In + values: + - operator + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + command: + - cilium-operator-generic + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + image: "quay.io/cilium/operator-generic:v1.10.4@sha256:c49a14e34634ff1a494c84b718641f27267fb3a0291ce3d74352b44f8a8d2f93" + imagePullPolicy: IfNotPresent + name: cilium-operator + livenessProbe: + httpGet: + host: '127.0.0.1' + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: "cilium-operator" + serviceAccountName: "cilium-operator" + tolerations: + - operator: Exists + volumes: + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path diff --git a/ii/community-infra/humacs.yaml b/ii/community-infra/humacs.yaml new file mode 100644 index 0000000..6c5ae85 --- /dev/null +++ b/ii/community-infra/humacs.yaml @@ -0,0 +1,96 @@ +# Configure + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: humacs-home-ii + namespace: humacs +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi +--- +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: humacs + namespace: humacs +spec: + releaseName: humacs + chart: + git: https://github.com/humacs/humacs + ref: eaf562e067faa086d3165aba659fa52b727662d8 + path: chart/humacs + values: + initContainers: + - name: humacs-home-ii-fix-permissions + image: alpine:3.12 + command: + - sh + - -c + - chown 1000:1000 -R /home/ii && chown 1000 /run/containerd/containerd.sock + volumeMounts: + - mountPath: /home/ii + name: home-ii + - name: run-containerd-containerd-sock + mountPath: /run/containerd/containerd.sock + image: + repository: registry.gitlab.com/humacs/humacs/ii + tag: 2021.09.10.1346 + options: + hostDockerSocket: false + hostTmp: true + timezone: Pacific/Auckland + gitName: CloudNative.NZ + gitEmail: cloudnativenz-humacs-test@ii.coop + profile: "" + repos: + - https://github.com/ii/org + - https://gitlab.com/ii/nz + preinitScript: | + git clone "https://github.com/sharingio/.sharing.io" || \ + git clone https://github.com/sharingio/.sharing.io + . /home/ii/.sharing.io/sharingio-pair-preinit-script.sh + extraEnvVars: + - name: SHARINGIO_PAIR_NAME + value: "community-infra" + - name: SHARINGIO_PAIR_USER + value: "$SHARINGIO_PAIR_INSTANCE_SETUP_USER" + - name: SHARINGIO_PAIR_GUEST_NAMES + - name: SHARINGIO_PAIR_KUBERNETES_CONTROLPLANE_ENDPOINT + - name: SHARINGIO_PAIR_LOAD_BALANCER_IP + - name: HUMACS_DEBUG + value: "true" + - name: REINIT_HOME_FOLDER + value: "true" + - name: SHARINGIO_PAIR_BASE_DNS_NAME + value: "$SHARINGIO_PAIR_INSTANCE_SETUP_BASEDNSNAME" + - name: GITHUB_TOKEN + value: "$SHARINGIO_PAIR_INSTANCE_SETUP_GITHUBOAUTHTOKEN" + - name: CONTAINER_RUNTIME_ENDPOINT + value: unix:///run/containerd/containerd.sock + - name: CONTAINER_ADDRESS + value: /run/containerd/containerd.sock + - name: CONTAINERD_NAMESPACE + value: k8s.io + - name: K8S_NODE + valueFrom: + fieldRef: + fieldPath: spec.nodeName + extraVolumes: + - name: home-ii + hostPath: + path: /home/ii + - name: host + hostPath: + path: / + - name: run-containerd-containerd-sock + hostPath: + path: /run/containerd/containerd.sock + extraVolumeMounts: + - name: home-ii + mountPath: /home/ii + - name: host + mountPath: /var/run/host diff --git a/ii/community-infra/preKubeadmCommands.sh b/ii/community-infra/preKubeadmCommands.sh new file mode 100644 index 0000000..15bba67 --- /dev/null +++ b/ii/community-infra/preKubeadmCommands.sh @@ -0,0 +1,67 @@ +# Prepare +# The set of commands to prepare the Ubuntu installs for Kubernetes + +#!/bin/bash +KUBERNETES_VERSION='1.21.2' + +PACKAGES=( + apt-transport-https + ca-certificates + cloud-utils + containerd + dnsutils + ebtables + gettext-base + git + jq + kitty-terminfo + prips + socat +) + +pwd +cd $(dirname $0) + +# ensure mounts +sed -ri '/\\sswap\\s/s/^#?/#/' /etc/fstab +swapoff -a +mount -a + +# install required packages +apt-get -y update +DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl software-properties-common +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list +apt-get update -y +TRIMMED_KUBERNETES_VERSION=$(echo $KUBERNETES_VERSION | sed 's/\./\\./g' | sed 's/^v//') +RESOLVED_KUBERNETES_VERSION=$(apt-cache policy kubelet | awk -v VERSION=${TRIMMED_KUBERNETES_VERSION} '$1~ VERSION { print $1 }' | head -n1) +apt-get install -y ${PACKAGES[*]} \ + kubelet=${RESOLVED_KUBERNETES_VERSION} \ + kubeadm=${RESOLVED_KUBERNETES_VERSION} \ + kubectl=${RESOLVED_KUBERNETES_VERSION} +systemctl daemon-reload + +# configure container runtime +cat < NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# topologySpreadConstraints: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# Monitor deployments may contain an anti-affinity rule for avoiding monitor +# collocation on the same node. This is a required rule when host network is used +# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a +# preferred rule with weight: 50. +# osd: +# mgr: +# cleanup: + annotations: +# all: +# mon: +# osd: +# cleanup: +# prepareosd: +# If no mgr annotations are set, prometheus scrape annotations will be set by default. +# mgr: + labels: +# all: +# mon: +# osd: +# cleanup: +# mgr: +# prepareosd: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: +# prepareosd: +# crashcollector: +# logcollector: +# cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false +# priorityClassNames: +# all: rook-ceph-default-priority-class +# mon: rook-ceph-mon-priority-class +# osd: rook-ceph-osd-priority-class +# mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + deviceFilter: "^sd[bdef]" + # config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: false + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false diff --git a/ii/community-infra/rook-ceph-common.yaml b/ii/community-infra/rook-ceph-common.yaml new file mode 100644 index 0000000..aed387d --- /dev/null +++ b/ii/community-infra/rook-ceph-common.yaml @@ -0,0 +1,1256 @@ +################################################################################################################### +# Create the common resources that are necessary to start the operator and the ceph cluster. +# These resources *must* be created before the operator.yaml and cluster.yaml or their variants. +# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace. +# +# If the operator needs to manage multiple clusters (in different namespaces), see the section below +# for "cluster-specific resources". The resources below that section will need to be created for each namespace +# where the operator needs to manage the cluster. The resources above that section do not be created again. +# +# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager) +################################################################################################################### + +# Namespace where the operator and other rook resources are created +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph # namespace:cluster +# OLM: BEGIN OBJECTBUCKET ROLEBINDING +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-object-bucket +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +# OLM: END OBJECTBUCKET ROLEBINDING +# OLM: BEGIN OPERATOR ROLE +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-admission-controller + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-admission-controller-role +rules: + - apiGroups: ["ceph.rook.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-admission-controller-rolebinding +subjects: + - kind: ServiceAccount + name: rook-ceph-admission-controller + apiGroup: "" + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rook-ceph-admission-controller-role + apiGroup: rbac.authorization.k8s.io +--- +# The cluster role for managing all the cluster-specific resources in a namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-cluster-mgmt + labels: + operator: rook + storage-backend: ceph +rules: + - apiGroups: + - "" + - apps + - extensions + resources: + - secrets + - pods + - pods/log + - services + - configmaps + - deployments + - daemonsets + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +rules: + # Most resources are represented by a string representation of their name, such as “pods”, just as it appears in the URL for the relevant API endpoint. + # However, some Kubernetes APIs involve a “subresource”, such as the logs for a pod. [...] + # To represent this in an RBAC role, use a slash to delimit the resource and subresource. + # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources + - apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create"] +--- +# The role for the operator to manage resources in its own namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +rules: + - apiGroups: + - "" + resources: + - pods + - configmaps + - services + verbs: + - get + - list + - watch + - patch + - create + - update + - delete + - apiGroups: + - apps + - extensions + resources: + - daemonsets + - statefulsets + - deployments + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - batch + resources: + - cronjobs + verbs: + - delete +--- +# The cluster role for managing the Rook CRDs +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph +rules: + - apiGroups: + - "" + resources: + # Pod access is needed for fencing + - pods + # Node access is needed for determining nodes where mons should run + - nodes + - nodes/proxy + - services + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + # PVs and PVCs are managed by the Rook provisioner + - persistentvolumes + - persistentvolumeclaims + - endpoints + verbs: + - get + - list + - watch + - patch + - create + - update + - delete + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" + - apiGroups: + - rook.io + resources: + - "*" + verbs: + - "*" + - apiGroups: + - policy + - apps + - extensions + resources: + # This is for the clusterdisruption controller + - poddisruptionbudgets + # This is for both clusterdisruption and nodedrain controllers + - deployments + - replicasets + verbs: + - "*" + - apiGroups: + - healthchecking.openshift.io + resources: + - machinedisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - machine.openshift.io + resources: + - machines + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - get + - update + - apiGroups: + - k8s.cni.cncf.io + resources: + - network-attachment-definitions + verbs: + - get +--- +# Aspects of ceph-mgr that require cluster-wide access +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster + labels: + operator: rook + storage-backend: ceph +rules: + - apiGroups: + - "" + resources: + - configmaps + - nodes + - nodes/proxy + - persistentvolumes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - list + - get + - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket + labels: + operator: rook + storage-backend: ceph +rules: + - apiGroups: + - "" + verbs: + - "*" + resources: + - secrets + - configmaps + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "objectbucket.io" + verbs: + - "*" + resources: + - "*" +# OLM: END OPERATOR ROLE +# OLM: BEGIN SERVICE ACCOUNT SYSTEM +--- +# The rook system service account used by the operator, agent, and discovery pods +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +# imagePullSecrets: +# - name: my-registry-secret + +# OLM: END SERVICE ACCOUNT SYSTEM +# OLM: BEGIN OPERATOR ROLEBINDING +--- +# Grant the operator, agent, and discovery agents access to resources in the namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-system +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-system +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-global +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +# OLM: END OPERATOR ROLEBINDING +################################################################################################################# +# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph" +# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles +# and bindings accordingly. +################################################################################################################# +# Service account for the Ceph OSDs. Must exist and cannot be renamed. +# OLM: BEGIN SERVICE ACCOUNT OSD +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +# imagePullSecrets: +# - name: my-registry-secret + +# OLM: END SERVICE ACCOUNT OSD +# OLM: BEGIN SERVICE ACCOUNT MGR +--- +# Service account for the Ceph Mgr. Must exist and cannot be renamed. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +# imagePullSecrets: +# - name: my-registry-secret + +# OLM: END SERVICE ACCOUNT MGR +# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +# OLM: END CMD REPORTER SERVICE ACCOUNT +# OLM: BEGIN CLUSTER ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: ["ceph.rook.io"] + resources: ["cephclusters", "cephclusters/finalizers"] + verbs: ["get", "list", "create", "update", "delete"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list +--- +# Aspects of ceph-mgr that require access to the system namespace +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-system +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Aspects of ceph-mgr that operate within the cluster's namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +rules: + - apiGroups: + - "" + resources: + - pods + - services + - pods/log + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" + - apiGroups: + - apps + resources: + - deployments/scale + - deployments + verbs: + - patch + - delete + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete +# OLM: END CLUSTER ROLE +# OLM: BEGIN CMD REPORTER ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +rules: + - apiGroups: + - "" + resources: + - pods + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete +# OLM: END CMD REPORTER ROLE +# OLM: BEGIN CLUSTER ROLEBINDING +--- +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cluster-mgmt + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-cluster-mgmt +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Allow the osd pods in this namespace to work with configmaps +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Allow the ceph mgr to access the rook system resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph # namespace:operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-system +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster + +--- +# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster + +# OLM: END CLUSTER ROLEBINDING +# OLM: BEGIN CMD REPORTER ROLEBINDING +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-cmd-reporter +subjects: + - kind: ServiceAccount + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +# OLM: END CMD REPORTER ROLEBINDING +################################################################################################################# +# Beginning of pod security policy resources. The example will assume the cluster will be created in the +# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify +# the roles and bindings accordingly. +################################################################################################################# +# OLM: BEGIN CLUSTER POD SECURITY POLICY +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + # Note: Kubernetes matches PSPs to deployments alphabetically. In some environments, this PSP may + # need to be renamed with a value that will match before others. + name: 00-rook-privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: "runtime/default" + seccomp.security.alpha.kubernetes.io/defaultProfileName: "runtime/default" +spec: + privileged: true + allowedCapabilities: + # required by CSI + - SYS_ADMIN + # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group + fsGroup: + rule: RunAsAny + # runAsUser, supplementalGroups - Rook needs to run some pods as root + # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + # seLinux - seLinux context is unknown ahead of time; set if this is well-known + seLinux: + rule: RunAsAny + volumes: + # recommended minimum set + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - secret + - projected + # required for Rook + - hostPath + - flexVolume + # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known + # allowedHostPaths: + # - pathPrefix: "/run/udev" # for OSD prep + # readOnly: false + # - pathPrefix: "/dev" # for OSD prep + # readOnly: false + # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to + # readOnly: false + # Ceph requires host IPC for setting up encrypted devices + hostIPC: true + # Ceph OSDs need to share the same PID namespace + hostPID: true + # hostNetwork can be set to 'false' if host networking isn't used + hostNetwork: true + hostPorts: + # Ceph messenger protocol v1 + - min: 6789 + max: 6790 # <- support old default port + # Ceph messenger protocol v2 + - min: 3300 + max: 3300 + # Ceph RADOS ports for OSDs, MDSes + - min: 6800 + max: 7300 + # # Ceph dashboard port HTTP (not recommended) + # - min: 7000 + # max: 7000 + # Ceph dashboard port HTTPS + - min: 8443 + max: 8443 + # Ceph mgr Prometheus Metrics + - min: 9283 + max: 9283 +# OLM: END CLUSTER POD SECURITY POLICY +# OLM: BEGIN POD SECURITY POLICY BINDINGS +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "psp:rook" +rules: + - apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - 00-rook-privileged + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-ceph-system-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "psp:rook" +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-default-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: + - kind: ServiceAccount + name: default + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-osd-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: + - kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-mgr-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-cmd-reporter-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: + - kind: ServiceAccount + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +# OLM: END CLUSTER POD SECURITY POLICY BINDINGS +# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +# OLM: END CSI CEPHFS SERVICE ACCOUNT +# OLM: BEGIN CSI CEPHFS ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-external-provisioner-cfg + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +# OLM: END CSI CEPHFS ROLE +# OLM: BEGIN CSI CEPHFS ROLEBINDING +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: cephfs-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI CEPHFS ROLEBINDING +# OLM: BEGIN CSI CEPHFS CLUSTER ROLE +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] +# OLM: END CSI CEPHFS CLUSTER ROLE +# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-cephfs-plugin-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "psp:rook" +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-cephfs-provisioner-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "psp:rook" +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI CEPHFS CLUSTER ROLEBINDING +# OLM: BEGIN CSI RBD SERVICE ACCOUNT +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +# OLM: END CSI RBD SERVICE ACCOUNT +# OLM: BEGIN CSI RBD ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-external-provisioner-cfg + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +# OLM: END CSI RBD ROLE +# OLM: BEGIN CSI RBD ROLEBINDING +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: rbd-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI RBD ROLEBINDING +# OLM: BEGIN CSI RBD CLUSTER ROLE +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: ["replication.storage.openshift.io"] + resources: ["volumereplications", "volumereplicationclasses"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] + - apiGroups: ["replication.storage.openshift.io"] + resources: ["volumereplications/finalizers"] + verbs: ["update"] + - apiGroups: ["replication.storage.openshift.io"] + resources: ["volumereplications/status"] + verbs: ["get", "patch", "update"] + - apiGroups: ["replication.storage.openshift.io"] + resources: ["volumereplicationclasses/status"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] +# OLM: END CSI RBD CLUSTER ROLE +# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-rbd-plugin-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "psp:rook" +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-rbd-provisioner-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "psp:rook" +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI RBD CLUSTER ROLEBINDING +--- +# Aspects of ceph osd purge job that require access to the operator/cluster namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "delete"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["get", "list", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["delete"] +--- +# Allow the osd purge job to run in this namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-purge-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:operator +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:operator diff --git a/ii/community-infra/rook-ceph-crds.yaml b/ii/community-infra/rook-ceph-crds.yaml new file mode 100644 index 0000000..d653fdf --- /dev/null +++ b/ii/community-infra/rook-ceph-crds.yaml @@ -0,0 +1,9154 @@ +############################################################################## +# Create the CRDs that are necessary before creating your Rook cluster. +# These resources *must* be created before the cluster.yaml or their variants. +############################################################################## +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephblockpools.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPool + listKind: CephBlockPoolList + plural: cephblockpools + singular: cephblockpool + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephBlockPool represents a Ceph Storage Pool + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PoolSpec represents the spec of ceph pool + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + mirroringInfo: + description: MirroringInfoSpec is the status of the pool mirroring + properties: + details: + type: string + lastChanged: + type: string + lastChecked: + type: string + mode: + description: Mode is the mirroring mode + type: string + peers: + description: Peers are the list of peer sites connected to that cluster + items: + description: PeersSpec contains peer details + properties: + client_name: + description: ClientName is the CephX user used to connect to the peer + type: string + direction: + description: Direction is the peer mirroring direction + type: string + mirror_uuid: + description: MirrorUUID is the mirror UUID + type: string + site_name: + description: SiteName is the current site name + type: string + uuid: + description: UUID is the peer UUID + type: string + type: object + type: array + site_name: + description: SiteName is the current site name + type: string + type: object + mirroringStatus: + description: MirroringStatusSpec is the status of the pool mirroring + properties: + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + summary: + description: Summary is the mirroring status summary + properties: + daemon_health: + description: DaemonHealth is the health of the mirroring daemon + type: string + health: + description: Health is the mirroring health + type: string + image_health: + description: ImageHealth is the health of the mirrored image + type: string + states: + description: States is the various state for all mirrored images + nullable: true + properties: + error: + description: Error is when the mirroring state is errored + type: integer + replaying: + description: Replaying is when the replay of the mirroring journal is on-going + type: integer + starting_replay: + description: StartingReplay is when the replay of the mirroring journal starts + type: integer + stopped: + description: Stopped is when the mirroring state is stopped + type: integer + stopping_replay: + description: StopReplaying is when the replay of the mirroring journal stops + type: integer + syncing: + description: Syncing is when the image is syncing + type: integer + unknown: + description: Unknown is when the mirroring state is unknown + type: integer + type: object + type: object + type: object + phase: + description: ConditionType represent a resource's status + type: string + snapshotScheduleStatus: + description: SnapshotScheduleStatusSpec is the status of the snapshot schedule + properties: + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + snapshotSchedules: + description: SnapshotSchedules is the list of snapshots scheduled + items: + description: SnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool + properties: + image: + description: Image is the mirrored image + type: string + items: + description: Items is the list schedules times for a given snapshot + items: + description: SnapshotSchedule is a schedule + properties: + interval: + description: Interval is the interval in which snapshots will be taken + type: string + start_time: + description: StartTime is the snapshot starting time + type: string + type: object + type: array + namespace: + description: Namespace is the RADOS namespace the image is part of + type: string + pool: + description: Pool is the pool name + type: string + type: object + nullable: true + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephclients.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephClient + listKind: CephClientList + plural: cephclients + singular: cephclient + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephClient represents a Ceph Client + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph Client + properties: + caps: + additionalProperties: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + name: + type: string + required: + - caps + type: object + status: + description: Status represents the status of a Ceph Client + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephclusters.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephCluster + listKind: CephClusterList + plural: cephclusters + singular: cephcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Directory used on the K8s nodes + jsonPath: .spec.dataDirHostPath + name: DataDirHostPath + type: string + - description: Number of MONs + jsonPath: .spec.mon.count + name: MonCount + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Phase + jsonPath: .status.phase + name: Phase + type: string + - description: Message + jsonPath: .status.message + name: Message + type: string + - description: Ceph Health + jsonPath: .status.ceph.health + name: Health + type: string + - jsonPath: .spec.external.enable + name: External + type: boolean + name: v1 + schema: + openAPIV3Schema: + description: CephCluster is a Ceph storage cluster + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterSpec represents the specification of Ceph Cluster + properties: + annotations: + additionalProperties: + additionalProperties: + type: string + description: Annotations are annotations + type: object + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + cephVersion: + description: The version information that instructs Rook to orchestrate a particular version of Ceph. + nullable: true + properties: + allowUnsupported: + description: Whether to allow unsupported versions (do not set to true in production) + type: boolean + image: + description: Image is the container image used to launch the ceph daemons, such as quay.io/ceph/ceph: The full list of images can be found at https://quay.io/repository/ceph/ceph?tab=tags + type: string + type: object + cleanupPolicy: + description: Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster deletion is not imminent. + nullable: true + properties: + allowUninstallWithVolumes: + description: AllowUninstallWithVolumes defines whether we can proceed with the uninstall if they are RBD images still present + type: boolean + confirmation: + description: Confirmation represents the cleanup confirmation + nullable: true + pattern: ^$|^yes-really-destroy-data$ + type: string + sanitizeDisks: + description: SanitizeDisks represents way we sanitize disks + nullable: true + properties: + dataSource: + description: DataSource is the data source to use to sanitize the disk with + enum: + - zero + - random + type: string + iteration: + description: Iteration is the number of pass to apply the sanitizing + format: int32 + type: integer + method: + description: Method is the method we use to sanitize disks + enum: + - complete + - quick + type: string + type: object + type: object + continueUpgradeAfterChecksEvenIfNotHealthy: + description: ContinueUpgradeAfterChecksEvenIfNotHealthy defines if an upgrade should continue even if PGs are not clean + type: boolean + crashCollector: + description: A spec for the crash controller + nullable: true + properties: + daysToRetain: + description: DaysToRetain represents the number of days to retain crash until they get pruned + type: integer + disable: + description: Disable determines whether we should enable the crash collector + type: boolean + type: object + dashboard: + description: Dashboard settings + nullable: true + properties: + enabled: + description: Enabled determines whether to enable the dashboard + type: boolean + port: + description: Port is the dashboard webserver port + maximum: 65535 + minimum: 0 + type: integer + ssl: + description: SSL determines whether SSL should be used + type: boolean + urlPrefix: + description: URLPrefix is a prefix for all URLs to use the dashboard with a reverse proxy + type: string + type: object + dataDirHostPath: + description: The path on the host where config and data can be persisted + pattern: ^/(\S+) + type: string + disruptionManagement: + description: A spec for configuring disruption management. + nullable: true + properties: + machineDisruptionBudgetNamespace: + description: Namespace to look for MDBs by the machineDisruptionBudgetController + type: string + manageMachineDisruptionBudgets: + description: This enables management of machinedisruptionbudgets + type: boolean + managePodBudgets: + description: This enables management of poddisruptionbudgets + type: boolean + osdMaintenanceTimeout: + description: OSDMaintenanceTimeout sets how many additional minutes the DOWN/OUT interval is for drained failure domains it only works if managePodBudgets is true. the default is 30 minutes + format: int64 + type: integer + pgHealthCheckTimeout: + description: PGHealthCheckTimeout is the time (in minutes) that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. Rook will continue with the next drain if the timeout exceeds. It only works if managePodBudgets is true. No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + format: int64 + type: integer + type: object + external: + description: Whether the Ceph Cluster is running external to this Kubernetes cluster mon, mgr, osd, mds, and discover daemons will not be created for external clusters. + nullable: true + properties: + enable: + description: Enable determines whether external mode is enabled or not + type: boolean + type: object + x-kubernetes-preserve-unknown-fields: true + healthCheck: + description: Internal daemon healthchecks and liveness probe + nullable: true + properties: + daemonHealth: + description: DaemonHealth is the health check for a given daemon + nullable: true + properties: + mon: + description: Monitor represents the health check settings for the Ceph monitor + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + osd: + description: ObjectStorageDaemon represents the health check settings for the Ceph OSDs + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + status: + description: Status represents the health check settings for the Ceph health + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + livenessProbe: + additionalProperties: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + type: object + description: LivenessProbe allows to change the livenessprobe configuration for a given daemon + type: object + type: object + labels: + additionalProperties: + additionalProperties: + type: string + description: Labels are label for a given daemons + type: object + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + logCollector: + description: Logging represents loggings settings + nullable: true + properties: + enabled: + description: Enabled represents whether the log collector is enabled + type: boolean + periodicity: + description: Periodicity is the periodicity of the log rotation + type: string + type: object + mgr: + description: A spec for mgr related options + nullable: true + properties: + allowMultiplePerNode: + description: AllowMultiplePerNode allows to run multiple managers on the same node (not recommended) + type: boolean + count: + description: Count is the number of manager to run + maximum: 2 + minimum: 0 + type: integer + modules: + description: Modules is the list of ceph manager modules to enable/disable + items: + description: Module represents mgr modules that the user wants to enable or disable + properties: + enabled: + description: Enabled determines whether a module should be enabled or not + type: boolean + name: + description: Name is the name of the ceph manager module + type: string + type: object + nullable: true + type: array + type: object + mon: + description: A spec for mon related options + nullable: true + properties: + allowMultiplePerNode: + description: AllowMultiplePerNode determines if we can run multiple monitors on the same node (not recommended) + type: boolean + count: + description: Count is the number of Ceph monitors + minimum: 0 + type: integer + stretchCluster: + description: StretchCluster is the stretch cluster specification + properties: + failureDomainLabel: + description: 'FailureDomainLabel the failure domain name (e,g: zone)' + type: string + subFailureDomain: + description: SubFailureDomain is the failure domain within a zone + type: string + zones: + description: Zones is the list of zones + items: + description: StretchClusterZoneSpec represents the specification of a stretched zone in a Ceph Cluster + properties: + arbiter: + description: Arbiter determines if the zone contains the arbiter + type: boolean + name: + description: Name is the name of the zone + type: string + volumeClaimTemplate: + description: VolumeClaimTemplate is the PVC template + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + nullable: true + type: array + type: object + volumeClaimTemplate: + description: VolumeClaimTemplate is the PVC definition + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + monitoring: + description: Prometheus based Monitoring settings + nullable: true + properties: + enabled: + description: Enabled determines whether to create the prometheus rules for the ceph cluster. If true, the prometheus types must exist or the creation will fail. + type: boolean + externalMgrEndpoints: + description: ExternalMgrEndpoints points to an existing Ceph prometheus exporter endpoint + items: + description: EndpointAddress is a tuple that describes single IP address. + properties: + hostname: + description: The Hostname of this endpoint + type: string + ip: + description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.' + type: string + nodeName: + description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' + type: string + targetRef: + description: Reference to object providing the endpoint. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - ip + type: object + nullable: true + type: array + externalMgrPrometheusPort: + description: ExternalMgrPrometheusPort Prometheus exporter port + maximum: 65535 + minimum: 0 + type: integer + rulesNamespace: + description: RulesNamespace is the namespace where the prometheus rules and alerts should be created. If empty, the same namespace as the cluster will be used. + type: string + type: object + network: + description: Network related configuration + nullable: true + properties: + dualStack: + description: DualStack determines whether Ceph daemons should listen on both IPv4 and IPv6 + type: boolean + hostNetwork: + description: HostNetwork to enable host network + type: boolean + ipFamily: + default: IPv4 + description: IPFamily is the single stack IPv6 or IPv4 protocol + enum: + - IPv4 + - IPv6 + nullable: true + type: string + provider: + description: Provider is what provides network connectivity to the cluster e.g. "host" or "multus" + nullable: true + type: string + selectors: + additionalProperties: + type: string + description: Selectors string values describe what networks will be used to connect the cluster. Meanwhile the keys describe each network respective responsibilities or any metadata storage provider decide. + nullable: true + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + placement: + additionalProperties: + description: Placement is the placement for an object + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + description: The placement-related configuration to pass to kubernetes (affinity, node selector, tolerations). + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassNames: + additionalProperties: + type: string + description: PriorityClassNames sets priority classes on components + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + removeOSDsIfOutAndSafeToRemove: + description: Remove the OSD that is out and safe to remove only if this option is true + type: boolean + resources: + additionalProperties: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + description: Resources set resource requests and limits + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + security: + description: Security represents security settings + nullable: true + properties: + kms: + description: KeyManagementService is the main Key Management option + nullable: true + properties: + connectionDetails: + additionalProperties: + type: string + description: ConnectionDetails contains the KMS connection details (address, port etc) + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + tokenSecretName: + description: TokenSecretName is the kubernetes secret containing the KMS token + type: string + type: object + type: object + skipUpgradeChecks: + description: SkipUpgradeChecks defines if an upgrade should be forced even if one of the check fails + type: boolean + storage: + description: A spec for available storage in the cluster and how it should be used + nullable: true + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + deviceFilter: + description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster + type: string + devicePathFilter: + description: A regular expression to allow more fine-grained selection of devices with path names + type: string + devices: + description: List of devices to use as storage devices + items: + description: Device represents a disk to use in the cluster + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + fullpath: + type: string + name: + type: string + type: object + nullable: true + type: array + x-kubernetes-preserve-unknown-fields: true + nodes: + items: + description: Node is a storage nodes + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + deviceFilter: + description: A regular expression to allow more fine-grained selection of devices on nodes across the cluster + type: string + devicePathFilter: + description: A regular expression to allow more fine-grained selection of devices with path names + type: string + devices: + description: List of devices to use as storage devices + items: + description: Device represents a disk to use in the cluster + properties: + config: + additionalProperties: + type: string + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + fullpath: + type: string + name: + type: string + type: object + nullable: true + type: array + x-kubernetes-preserve-unknown-fields: true + name: + type: string + resources: + description: ResourceRequirements describes the compute resource requirements. + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + useAllDevices: + description: Whether to consume all the storage devices found on a machine + type: boolean + volumeClaimTemplates: + description: PersistentVolumeClaims to use as storage + items: + description: PersistentVolumeClaim is a user's request for and claim to a persistent volume + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: array + type: object + nullable: true + type: array + onlyApplyOSDPlacement: + type: boolean + storageClassDeviceSets: + items: + description: StorageClassDeviceSet is a storage class device set + properties: + config: + additionalProperties: + type: string + description: Provider-specific device configuration + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + count: + description: Count is the number of devices in this set + minimum: 1 + type: integer + encrypted: + description: Whether to encrypt the deviceSet + type: boolean + name: + description: Name is a unique identifier for the set + type: string + placement: + description: Placement is the placement for an object + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + portable: + description: Portable represents OSD portability across the hosts + type: boolean + preparePlacement: + description: Placement is the placement for an object + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + resources: + description: ResourceRequirements describes the compute resource requirements. + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + schedulerName: + description: Scheduler name for OSD pod placement + type: string + tuneDeviceClass: + description: TuneSlowDeviceClass Tune the OSD when running on a slow Device Class + type: boolean + tuneFastDeviceClass: + description: TuneFastDeviceClass Tune the OSD when running on a fast Device Class + type: boolean + volumeClaimTemplates: + description: VolumeClaimTemplates is a list of PVC templates for the underlying storage devices + items: + description: PersistentVolumeClaim is a user's request for and claim to a persistent volume + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: array + required: + - count + - name + - volumeClaimTemplates + type: object + nullable: true + type: array + useAllDevices: + description: Whether to consume all the storage devices found on a machine + type: boolean + useAllNodes: + type: boolean + volumeClaimTemplates: + description: PersistentVolumeClaims to use as storage + items: + description: PersistentVolumeClaim is a user's request for and claim to a persistent volume + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: array + type: object + waitTimeoutForHealthyOSDInMinutes: + description: WaitTimeoutForHealthyOSDInMinutes defines the time the operator would wait before an OSD can be stopped for upgrade or restart. If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. The default wait timeout is 10 minutes. + format: int64 + type: integer + type: object + status: + description: ClusterStatus represents the status of a Ceph cluster + nullable: true + properties: + ceph: + description: CephStatus is the details health of a Ceph Cluster + properties: + capacity: + description: Capacity is the capacity information of a Ceph Cluster + properties: + bytesAvailable: + format: int64 + type: integer + bytesTotal: + format: int64 + type: integer + bytesUsed: + format: int64 + type: integer + lastUpdated: + type: string + type: object + details: + additionalProperties: + description: CephHealthMessage represents the health message of a Ceph Cluster + properties: + message: + type: string + severity: + type: string + required: + - message + - severity + type: object + type: object + health: + type: string + lastChanged: + type: string + lastChecked: + type: string + previousHealth: + type: string + versions: + description: CephDaemonsVersions show the current ceph version for different ceph daemons + properties: + cephfs-mirror: + additionalProperties: + type: integer + description: CephFSMirror shows CephFSMirror Ceph version + type: object + mds: + additionalProperties: + type: integer + description: Mds shows Mds Ceph version + type: object + mgr: + additionalProperties: + type: integer + description: Mgr shows Mgr Ceph version + type: object + mon: + additionalProperties: + type: integer + description: Mon shows Mon Ceph version + type: object + osd: + additionalProperties: + type: integer + description: Osd shows Osd Ceph version + type: object + overall: + additionalProperties: + type: integer + description: Overall shows overall Ceph version + type: object + rbd-mirror: + additionalProperties: + type: integer + description: RbdMirror shows RbdMirror Ceph version + type: object + rgw: + additionalProperties: + type: integer + description: Rgw shows Rgw Ceph version + type: object + type: object + type: object + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + message: + type: string + phase: + description: ConditionType represent a resource's status + type: string + state: + description: ClusterState represents the state of a Ceph Cluster + type: string + storage: + description: CephStorage represents flavors of Ceph Cluster Storage + properties: + deviceClasses: + items: + description: DeviceClasses represents device classes of a Ceph Cluster + properties: + name: + type: string + type: object + type: array + type: object + version: + description: ClusterVersion represents the version of a Ceph Cluster + properties: + image: + type: string + version: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephfilesystemmirrors.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystemMirror + listKind: CephFilesystemMirrorList + plural: cephfilesystemmirrors + singular: cephfilesystemmirror + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephFilesystemMirror is the Ceph Filesystem Mirror object definition + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FilesystemMirroringSpec is the filesystem mirroring specification + properties: + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + placement: + description: The affinity to place the rgw pods (default is to place on any available node) + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + priorityClassName: + description: PriorityClassName sets priority class on the cephfs-mirror pods + type: string + resources: + description: The resource requirements for the cephfs-mirror pods + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + status: + description: Status represents the status of an object + properties: + phase: + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephfilesystems.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystem + listKind: CephFilesystemList + plural: cephfilesystems + singular: cephfilesystem + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Number of desired active MDS daemons + jsonPath: .spec.metadataServer.activeCount + name: ActiveMDS + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.phase + name: Phase + type: string + name: v1 + schema: + openAPIV3Schema: + description: CephFilesystem represents a Ceph Filesystem + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FilesystemSpec represents the spec of a file system + properties: + dataPools: + description: The data pool settings + items: + description: PoolSpec represents the spec of ceph pool + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + nullable: true + type: array + metadataPool: + description: The metadata pool settings + nullable: true + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + metadataServer: + description: The mds pod info + properties: + activeCount: + description: The number of metadata servers that are active. The remaining servers in the cluster will be in standby mode. + format: int32 + maximum: 10 + minimum: 1 + type: integer + activeStandby: + description: Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. If false, standbys will still be available, but will not have a warm metadata cache. + type: boolean + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + placement: + description: The affinity to place the mds pods (default is to place on all available node) with a daemonset + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + description: PriorityClassName sets priority classes on components + type: string + resources: + description: The resource requirements for the rgw pods + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - activeCount + type: object + mirroring: + description: The mirroring settings + nullable: true + properties: + enabled: + description: Enabled whether this filesystem is mirrored or not + type: boolean + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotRetention: + description: Retention is the retention policy for a snapshot schedule One path has exactly one retention policy. A policy can however contain multiple count-time period pairs in order to specify complex retention policies + items: + description: SnapshotScheduleRetentionSpec is a retention policy + properties: + duration: + description: Duration represents the retention duration for a snapshot + type: string + path: + description: Path is the path to snapshot + type: string + type: object + type: array + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored filesystems + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + preserveFilesystemOnDelete: + description: Preserve the fs in the cluster on CephFilesystem CR deletion. Setting this to true automatically implies PreservePoolsOnDelete is true. + type: boolean + preservePoolsOnDelete: + description: Preserve pools on filesystem deletion + type: boolean + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - dataPools + - metadataPool + - metadataServer + type: object + status: + description: CephFilesystemStatus represents the status of a Ceph Filesystem + properties: + info: + additionalProperties: + type: string + description: Use only info and put mirroringStatus in it? + nullable: true + type: object + mirroringStatus: + description: MirroringStatus is the filesystem mirroring status + properties: + daemonsStatus: + description: PoolMirroringStatus is the mirroring status of a filesystem + items: + description: FilesystemMirrorInfoSpec is the filesystem mirror status of a given filesystem + properties: + daemon_id: + description: DaemonID is the cephfs-mirror name + type: integer + filesystems: + description: Filesystems is the list of filesystems managed by a given cephfs-mirror daemon + items: + description: FilesystemsSpec is spec for the mirrored filesystem + properties: + directory_count: + description: DirectoryCount is the number of directories in the filesystem + type: integer + filesystem_id: + description: FilesystemID is the filesystem identifier + type: integer + name: + description: Name is name of the filesystem + type: string + peers: + description: Peers represents the mirroring peers + items: + description: FilesystemMirrorInfoPeerSpec is the specification of a filesystem peer mirror + properties: + remote: + description: Remote are the remote cluster information + properties: + client_name: + description: ClientName is cephx name + type: string + cluster_name: + description: ClusterName is the name of the cluster + type: string + fs_name: + description: FsName is the filesystem name + type: string + type: object + stats: + description: Stats are the stat a peer mirror + properties: + failure_count: + description: FailureCount is the number of mirroring failure + type: integer + recovery_count: + description: RecoveryCount is the number of recovery attempted after failures + type: integer + type: object + uuid: + description: UUID is the peer unique identifier + type: string + type: object + type: array + type: object + type: array + type: object + nullable: true + type: array + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + type: object + phase: + description: ConditionType represent a resource's status + type: string + snapshotScheduleStatus: + description: FilesystemSnapshotScheduleStatusSpec is the status of the snapshot schedule + properties: + details: + description: Details contains potential status errors + type: string + lastChanged: + description: LastChanged is the last time time the status last changed + type: string + lastChecked: + description: LastChecked is the last time time the status was checked + type: string + snapshotSchedules: + description: SnapshotSchedules is the list of snapshots scheduled + items: + description: FilesystemSnapshotSchedulesSpec is the list of snapshot scheduled for images in a pool + properties: + fs: + description: Fs is the name of the Ceph Filesystem + type: string + path: + description: Path is the path on the filesystem + type: string + rel_path: + type: string + retention: + description: FilesystemSnapshotScheduleStatusRetention is the retention specification for a filesystem snapshot schedule + properties: + active: + description: Active is whether the scheduled is active or not + type: boolean + created: + description: Created is when the snapshot schedule was created + type: string + created_count: + description: CreatedCount is total amount of snapshots + type: integer + first: + description: First is when the first snapshot schedule was taken + type: string + last: + description: Last is when the last snapshot schedule was taken + type: string + last_pruned: + description: LastPruned is when the last snapshot schedule was pruned + type: string + pruned_count: + description: PrunedCount is total amount of pruned snapshots + type: integer + start: + description: Start is when the snapshot schedule starts + type: string + type: object + schedule: + type: string + subvol: + description: Subvol is the name of the sub volume + type: string + type: object + nullable: true + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephnfses.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephNFS + listKind: CephNFSList + plural: cephnfses + shortNames: + - nfs + singular: cephnfs + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephNFS represents a Ceph NFS + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NFSGaneshaSpec represents the spec of an nfs ganesha server + properties: + rados: + description: RADOS is the Ganesha RADOS specification + properties: + namespace: + description: Namespace is the RADOS namespace where NFS client recovery data is stored. + type: string + pool: + description: Pool is the RADOS pool where NFS client recovery data is stored. + type: string + required: + - namespace + - pool + type: object + server: + description: Server is the Ganesha Server specification + properties: + active: + description: The number of active Ganesha servers + type: integer + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + logLevel: + description: LogLevel set logging level + type: string + placement: + description: The affinity to place the ganesha pods + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + description: PriorityClassName sets the priority class on the pods + type: string + resources: + description: Resources set resource requests and limits + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - active + type: object + required: + - rados + - server + type: object + status: + description: Status represents the status of an object + properties: + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephobjectrealms.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectRealm + listKind: CephObjectRealmList + plural: cephobjectrealms + singular: cephobjectrealm + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephObjectRealm represents a Ceph Object Store Gateway Realm + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObjectRealmSpec represent the spec of an ObjectRealm + nullable: true + properties: + pull: + description: PullSpec represents the pulling specification of a Ceph Object Storage Gateway Realm + properties: + endpoint: + type: string + required: + - endpoint + type: object + required: + - pull + type: object + status: + description: Status represents the status of an object + properties: + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephobjectstores.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStore + listKind: CephObjectStoreList + plural: cephobjectstores + singular: cephobjectstore + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephObjectStore represents a Ceph Object Store Gateway + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObjectStoreSpec represent the spec of a pool + properties: + dataPool: + description: The data pool settings + nullable: true + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + gateway: + description: The rgw pod info + nullable: true + properties: + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + caBundleRef: + description: The name of the secret that stores custom ca-bundle with root and intermediate certificates. + nullable: true + type: string + externalRgwEndpoints: + description: ExternalRgwEndpoints points to external rgw endpoint(s) + items: + description: EndpointAddress is a tuple that describes single IP address. + properties: + hostname: + description: The Hostname of this endpoint + type: string + ip: + description: 'The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready. TODO: This should allow hostname or IP, See #4447.' + type: string + nodeName: + description: 'Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.' + type: string + targetRef: + description: Reference to object providing the endpoint. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - ip + type: object + nullable: true + type: array + instances: + description: The number of pods in the rgw replicaset. + format: int32 + nullable: true + type: integer + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + placement: + description: The affinity to place the rgw pods (default is to place on any available node) + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + port: + description: The port the rgw service will be listening on (http) + format: int32 + type: integer + priorityClassName: + description: PriorityClassName sets priority classes on the rgw pods + type: string + resources: + description: The resource requirements for the rgw pods + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + securePort: + description: The port the rgw service will be listening on (https) + format: int32 + maximum: 65535 + minimum: 0 + nullable: true + type: integer + service: + description: The configuration related to add/set on each rgw service. + nullable: true + properties: + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each rgw service. nullable optional + type: object + type: object + sslCertificateRef: + description: The name of the secret that stores the ssl certificate for secure rgw connections + nullable: true + type: string + type: object + healthCheck: + description: The rgw Bucket healthchecks and liveness probe + nullable: true + properties: + bucket: + description: HealthCheckSpec represents the health check of an object store bucket + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + livenessProbe: + description: ProbeSpec is a wrapper around Probe so it can be enabled or disabled for a Ceph daemon + properties: + disabled: + description: Disabled determines whether probe is disable or not + type: boolean + probe: + description: Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic. + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + type: object + type: object + metadataPool: + description: The metadata pool settings + nullable: true + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + preservePoolsOnDelete: + description: Preserve pools on object store deletion + type: boolean + security: + description: Security represents security settings + nullable: true + properties: + kms: + description: KeyManagementService is the main Key Management option + nullable: true + properties: + connectionDetails: + additionalProperties: + type: string + description: ConnectionDetails contains the KMS connection details (address, port etc) + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + tokenSecretName: + description: TokenSecretName is the kubernetes secret containing the KMS token + type: string + type: object + type: object + zone: + description: The multisite info + nullable: true + properties: + name: + description: RGW Zone the Object Store is in + type: string + required: + - name + type: object + type: object + status: + description: ObjectStoreStatus represents the status of a Ceph Object Store resource + properties: + bucketStatus: + description: BucketStatus represents the status of a bucket + properties: + details: + type: string + health: + description: ConditionType represent a resource's status + type: string + lastChanged: + type: string + lastChecked: + type: string + type: object + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array + info: + additionalProperties: + type: string + nullable: true + type: object + message: + type: string + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephobjectstoreusers.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStoreUser + listKind: CephObjectStoreUserList + plural: cephobjectstoreusers + shortNames: + - rcou + - objectuser + singular: cephobjectstoreuser + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephObjectStoreUser represents a Ceph Object Store Gateway User + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObjectStoreUserSpec represent the spec of an Objectstoreuser + properties: + displayName: + description: The display name for the ceph users + type: string + store: + description: The store the user will be created in + type: string + type: object + status: + description: ObjectStoreUserStatus represents the status Ceph Object Store Gateway User + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephobjectzonegroups.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectZoneGroup + listKind: CephObjectZoneGroupList + plural: cephobjectzonegroups + singular: cephobjectzonegroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephObjectZoneGroup represents a Ceph Object Store Gateway Zone Group + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObjectZoneGroupSpec represent the spec of an ObjectZoneGroup + properties: + realm: + description: The display name for the ceph users + type: string + required: + - realm + type: object + status: + description: Status represents the status of an object + properties: + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephobjectzones.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectZone + listKind: CephObjectZoneList + plural: cephobjectzones + singular: cephobjectzone + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephObjectZone represents a Ceph Object Store Gateway Zone + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObjectZoneSpec represent the spec of an ObjectZone + properties: + dataPool: + description: The data pool settings + nullable: true + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + metadataPool: + description: The metadata pool settings + nullable: true + properties: + compressionMode: + default: none + description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + zoneGroup: + description: The display name for the ceph users + type: string + required: + - dataPool + - metadataPool + - zoneGroup + type: object + status: + description: Status represents the status of an object + properties: + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephrbdmirrors.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephRBDMirror + listKind: CephRBDMirrorList + plural: cephrbdmirrors + singular: cephrbdmirror + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephRBDMirror represents a Ceph RBD Mirror + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RBDMirroringSpec represents the specification of an RBD mirror daemon + properties: + annotations: + additionalProperties: + type: string + description: The annotations-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + count: + description: Count represents the number of rbd mirror instance to run + minimum: 1 + type: integer + labels: + additionalProperties: + type: string + description: The labels-related configuration to add/set on each Pod related object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + placement: + description: The affinity to place the rgw pods (default is to place on any available node) + nullable: true + properties: + nodeAffinity: + description: NodeAffinity is a group of node affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: PodAffinity is a group of inter pod affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: PodAntiAffinity is a group of inter pod anti affinity scheduling rules + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + tolerations: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + description: PriorityClassName sets priority class on the rbd mirror pods + type: string + resources: + description: The resource requirements for the rbd mirror pods + nullable: true + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - count + type: object + status: + description: Status represents the status of an object + properties: + phase: + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: objectbucketclaims.objectbucket.io +spec: + group: objectbucket.io + names: + kind: ObjectBucketClaim + listKind: ObjectBucketClaimList + plural: objectbucketclaims + singular: objectbucketclaim + shortNames: + - obc + - obcs + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + storageClassName: + type: string + bucketName: + type: string + generateBucketName: + type: string + additionalConfig: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + objectBucketName: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: objectbuckets.objectbucket.io +spec: + group: objectbucket.io + names: + kind: ObjectBucket + listKind: ObjectBucketList + plural: objectbuckets + singular: objectbucket + shortNames: + - ob + - obs + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + storageClassName: + type: string + endpoint: + type: object + nullable: true + properties: + bucketHost: + type: string + bucketPort: + type: integer + format: int32 + bucketName: + type: string + region: + type: string + subRegion: + type: string + additionalConfig: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + authentication: + type: object + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + additionalState: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + reclaimPolicy: + type: string + claimRef: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: volumereplicationclasses.replication.storage.openshift.io +spec: + group: replication.storage.openshift.io + names: + kind: VolumeReplicationClass + listKind: VolumeReplicationClassList + plural: volumereplicationclasses + shortNames: + - vrc + singular: volumereplicationclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.provisioner + name: provisioner + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: VolumeReplicationClass is the Schema for the volumereplicationclasses API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeReplicationClassSpec specifies parameters that an underlying storage system uses when creating a volume replica. A specific VolumeReplicationClass is used by specifying its name in a VolumeReplication object. + properties: + parameters: + additionalProperties: + type: string + description: Parameters is a key-value map with storage provisioner specific configurations for creating volume replicas + type: object + provisioner: + description: Provisioner is the name of storage provisioner + type: string + required: + - provisioner + type: object + status: + description: VolumeReplicationClassStatus defines the observed state of VolumeReplicationClass + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: volumereplications.replication.storage.openshift.io +spec: + group: replication.storage.openshift.io + names: + kind: VolumeReplication + listKind: VolumeReplicationList + plural: volumereplications + shortNames: + - vr + singular: volumereplication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.volumeReplicationClass + name: volumeReplicationClass + type: string + - jsonPath: .spec.dataSource.name + name: pvcName + type: string + - jsonPath: .spec.replicationState + name: desiredState + type: string + - jsonPath: .status.state + name: currentState + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: VolumeReplication is the Schema for the volumereplications API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeReplicationSpec defines the desired state of VolumeReplication + properties: + dataSource: + description: DataSource represents the object associated with the volume + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + replicationState: + description: ReplicationState represents the replication operation to be performed on the volume. Supported operations are "primary", "secondary" and "resync" + enum: + - primary + - secondary + - resync + type: string + volumeReplicationClass: + description: VolumeReplicationClass is the VolumeReplicationClass name for this VolumeReplication resource + type: string + required: + - dataSource + - replicationState + - volumeReplicationClass + type: object + status: + description: VolumeReplicationStatus defines the observed state of VolumeReplication + properties: + conditions: + description: Conditions are the list of conditions and their status. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastCompletionTime: + format: date-time + type: string + lastStartTime: + format: date-time + type: string + message: + type: string + observedGeneration: + description: observedGeneration is the last generation change the operator has dealt with + format: int64 + type: integer + state: + description: State captures the latest state of the replication operation + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: volumes.rook.io +spec: + group: rook.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + shortNames: + - rv + singular: volume + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + attachments: + items: + properties: + clusterName: + type: string + mountDir: + type: string + node: + type: string + podName: + type: string + podNamespace: + type: string + readOnly: + type: boolean + required: + - clusterName + - mountDir + - node + - podName + - podNamespace + - readOnly + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + required: + - attachments + - metadata + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/ii/community-infra/rook-ceph-dashboard.yaml b/ii/community-infra/rook-ceph-dashboard.yaml new file mode 100644 index 0000000..4b94a37 --- /dev/null +++ b/ii/community-infra/rook-ceph-dashboard.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: rook-ceph-mgr-dashboard-external-https + namespace: rook-ceph + labels: + app: rook-ceph-mgr + rook_cluster: rook-ceph +spec: + ports: + - name: dashboard + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app: rook-ceph-mgr + rook_cluster: rook-ceph + sessionAffinity: None + type: NodePort diff --git a/ii/community-infra/rook-ceph-operator.yaml b/ii/community-infra/rook-ceph-operator.yaml new file mode 100644 index 0000000..ffcb8d3 --- /dev/null +++ b/ii/community-infra/rook-ceph-operator.yaml @@ -0,0 +1,525 @@ +################################################################################################################# +# The deployment for the rook operator +# Contains the common settings for most Kubernetes deployments. +# For example, to create the rook-ceph cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +# +# Also see other operator sample files for variations of operator.yaml: +# - operator-openshift.yaml: Common settings for running in OpenShift +############################################################################################################### + +# Rook Ceph Operator Config ConfigMap +# Use this ConfigMap to override Rook-Ceph Operator configurations. +# NOTE! Precedence will be given to this config if the same Env Var config also exists in the +# Operator Deployment. +# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config +# here. It is recommended to then remove it from the Deployment to eliminate any future confusion. +kind: ConfigMap +apiVersion: v1 +metadata: + name: rook-ceph-operator-config + # should be in the namespace of the operator + namespace: rook-ceph # namespace:operator +data: + # The logging level for the operator: INFO | DEBUG + ROOK_LOG_LEVEL: "INFO" + + # Enable the CSI driver. + # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml + ROOK_CSI_ENABLE_CEPHFS: "true" + # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below. + ROOK_CSI_ENABLE_RBD: "true" + ROOK_CSI_ENABLE_GRPC_METRICS: "false" + + # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary + # in some network configurations where the SDN does not provide access to an external cluster or + # there is significant drop in read/write performance. + # CSI_ENABLE_HOST_NETWORK: "true" + + # Set logging level for csi containers. + # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. + # CSI_LOG_LEVEL: "0" + + # OMAP generator will generate the omap mapping between the PV name and the RBD image. + # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. + # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable + # it set it to false. + # CSI_ENABLE_OMAP_GENERATOR: "false" + + # set to false to disable deployment of snapshotter container in CephFS provisioner pod. + CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true" + + # set to false to disable deployment of snapshotter container in RBD provisioner pod. + CSI_ENABLE_RBD_SNAPSHOTTER: "true" + + # Enable cephfs kernel driver instead of ceph-fuse. + # If you disable the kernel client, your application may be disrupted during upgrade. + # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html + # NOTE! cephfs quota is not supported in kernel version < 4.17 + CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true" + + # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted. + # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html + CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType" + + # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. + # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html + CSI_CEPHFS_FSGROUPPOLICY: "None" + + # (Optional) Allow starting unsupported ceph-csi image + ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false" + # The default version of CSI supported by Rook will be started. To change the version + # of the CSI driver to something other than what is officially supported, change + # these images to the desired release of the CSI driver. + # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0" + # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0" + # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" + # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2" + # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1" + # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.2.1" + + # (Optional) set user created priorityclassName for csi plugin pods. + # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" + + # (Optional) set user created priorityclassName for csi provisioner pods. + # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical" + + # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. + # Default value is RollingUpdate. + # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete" + # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. + # Default value is RollingUpdate. + # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete" + + # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path. + # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet" + + # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. + # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2" + # Labels to add to the CSI RBD Deployments and DaemonSets Pods. + # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2" + + # (Optional) CephCSI provisioner NodeAffinity(applied to both CephFS and RBD provisioner). + # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" + # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner). + # Put here list of taints you want to tolerate in YAML format. + # CSI provisioner would be best to start on the same nodes as other ceph daemons. + # CSI_PROVISIONER_TOLERATIONS: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + # (Optional) CephCSI plugin NodeAffinity(applied to both CephFS and RBD plugin). + # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" + # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin). + # Put here list of taints you want to tolerate in YAML format. + # CSI plugins need to be started on all the nodes where the clients need to mount the storage. + # CSI_PLUGIN_TOLERATIONS: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + + # (Optional) CephCSI RBD provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY). + # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node" + # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS). + # Put here list of taints you want to tolerate in YAML format. + # CSI provisioner would be best to start on the same nodes as other ceph daemons. + # CSI_RBD_PROVISIONER_TOLERATIONS: | + # - key: node.rook.io/rbd + # operator: Exists + # (Optional) CephCSI RBD plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY). + # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node" + # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS). + # Put here list of taints you want to tolerate in YAML format. + # CSI plugins need to be started on all the nodes where the clients need to mount the storage. + # CSI_RBD_PLUGIN_TOLERATIONS: | + # - key: node.rook.io/rbd + # operator: Exists + + # (Optional) CephCSI CephFS provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY). + # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node" + # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS). + # Put here list of taints you want to tolerate in YAML format. + # CSI provisioner would be best to start on the same nodes as other ceph daemons. + # CSI_CEPHFS_PROVISIONER_TOLERATIONS: | + # - key: node.rook.io/cephfs + # operator: Exists + # (Optional) CephCSI CephFS plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY). + # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node" + # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS). + # Put here list of taints you want to tolerate in YAML format. + # CSI plugins need to be started on all the nodes where the clients need to mount the storage. + # CSI_CEPHFS_PLUGIN_TOLERATIONS: | + # - key: node.rook.io/cephfs + # operator: Exists + + # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource + # requests and limits you want to apply for provisioner pod + # CSI_RBD_PROVISIONER_RESOURCE: | + # - name : csi-provisioner + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-resizer + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-attacher + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-snapshotter + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-rbdplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource + # requests and limits you want to apply for plugin pod + # CSI_RBD_PLUGIN_RESOURCE: | + # - name : driver-registrar + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # - name : csi-rbdplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource + # requests and limits you want to apply for provisioner pod + # CSI_CEPHFS_PROVISIONER_RESOURCE: | + # - name : csi-provisioner + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-resizer + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-attacher + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-cephfsplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource + # requests and limits you want to apply for plugin pod + # CSI_CEPHFS_PLUGIN_RESOURCE: | + # - name : driver-registrar + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # - name : csi-cephfsplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + + # Configure CSI CSI Ceph FS grpc and liveness metrics port + # CSI_CEPHFS_GRPC_METRICS_PORT: "9091" + # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081" + # Configure CSI RBD grpc and liveness metrics port + # CSI_RBD_GRPC_METRICS_PORT: "9090" + # CSI_RBD_LIVENESS_METRICS_PORT: "9080" + + # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used + ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" + + # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release + # in favor of the CSI driver. + ROOK_ENABLE_FLEX_DRIVER: "false" + # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. + # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. + ROOK_ENABLE_DISCOVERY_DAEMON: "false" + # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15. + ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15" + # Enable volume replication controller + CSI_ENABLE_VOLUME_REPLICATION: "false" + # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0" + + # (Optional) Admission controller NodeAffinity. + # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" + # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format. + # Admission controller would be best to start on the same nodes as other ceph daemons. + # ADMISSION_CONTROLLER_TOLERATIONS: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists +--- +# OLM: BEGIN OPERATOR DEPLOYMENT +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-operator + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +spec: + selector: + matchLabels: + app: rook-ceph-operator + replicas: 1 + template: + metadata: + labels: + app: rook-ceph-operator + spec: + serviceAccountName: rook-ceph-system + containers: + - name: rook-ceph-operator + image: rook/ceph:v1.7.2 + args: ["ceph", "operator"] + volumeMounts: + - mountPath: /var/lib/rook + name: rook-config + - mountPath: /etc/ceph + name: default-config-dir + env: + # If the operator should only watch for cluster CRDs in the same namespace, set this to "true". + # If this is not set to true, the operator will watch for cluster CRDs in all namespaces. + - name: ROOK_CURRENT_NAMESPACE_ONLY + value: "false" + # Rook Agent toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: AGENT_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate + # - name: AGENT_TOLERATION_KEY + # value: "" + # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format. + # - name: AGENT_TOLERATIONS + # value: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + # (Optional) Rook Agent priority class name to set on the pod(s) + # - name: AGENT_PRIORITY_CLASS_NAME + # value: "" + # (Optional) Rook Agent NodeAffinity. + # - name: AGENT_NODE_AFFINITY + # value: "role=storage-node; storage=rook,ceph" + # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. + # `Any` uses Ceph admin credentials by default/fallback. + # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and + # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. + # to the namespace in which the `mountSecret` Kubernetes secret namespace. + # - name: AGENT_MOUNT_SECURITY_MODE + # value: "Any" + # Set the path where the Rook agent can find the flex volumes + # - name: FLEXVOLUME_DIR_PATH + # value: "" + # Set the path where kernel modules can be found + # - name: LIB_MODULES_DIR_PATH + # value: "" + # Mount any extra directories into the agent container + # - name: AGENT_MOUNTS + # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" + # Rook Discover toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: DISCOVER_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate + # - name: DISCOVER_TOLERATION_KEY + # value: "" + # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format. + # - name: DISCOVER_TOLERATIONS + # value: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + # (Optional) Rook Discover priority class name to set on the pod(s) + # - name: DISCOVER_PRIORITY_CLASS_NAME + # value: "" + # (Optional) Discover Agent NodeAffinity. + # - name: DISCOVER_AGENT_NODE_AFFINITY + # value: "role=storage-node; storage=rook, ceph" + # (Optional) Discover Agent Pod Labels. + # - name: DISCOVER_AGENT_POD_LABELS + # value: "key1=value1,key2=value2" + + # The duration between discovering devices in the rook-discover daemonset. + - name: ROOK_DISCOVER_DEVICES_INTERVAL + value: "60m" + + # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. + # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues. + # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 + - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED + value: "false" + + # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). + # Disable it here if you have similar issues. + # For more details see https://github.com/rook/rook/issues/2417 + - name: ROOK_ENABLE_SELINUX_RELABELING + value: "true" + + # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. + # For more details see https://github.com/rook/rook/issues/2254 + - name: ROOK_ENABLE_FSGROUP + value: "true" + + # Disable automatic orchestration when new devices are discovered + - name: ROOK_DISABLE_DEVICE_HOTPLUG + value: "false" + + # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+". + # In case of more than one regex, use comma to separate between them. + # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" + # Add regex expression after putting a comma to blacklist a disk + # If value is empty, the default regex will be used. + - name: DISCOVER_DAEMON_UDEV_BLACKLIST + value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" + + # Time to wait until the node controller will move Rook pods to other + # nodes after detecting an unreachable node. + # Pods affected by this setting are: + # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox + # The value used in this variable replaces the default value of 300 secs + # added automatically by k8s as Toleration for + # + # The total amount of time to reschedule Rook pods in healthy nodes + # before detecting a condition will be the sum of: + # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag) + # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds + - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS + value: "5" + + # The name of the node to pass with the downward API + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # The pod name to pass with the downward API + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # The pod namespace to pass with the downward API + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # Recommended resource requests and limits, if desired + #resources: + # limits: + # cpu: 500m + # memory: 256Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # Uncomment it to run lib bucket provisioner in multithreaded mode + #- name: LIB_BUCKET_PROVISIONER_THREADS + # value: "5" + + # Uncomment it to run rook operator on the host network + #hostNetwork: true + volumes: + - name: rook-config + emptyDir: {} + - name: default-config-dir + emptyDir: {} +# OLM: END OPERATOR DEPLOYMENT diff --git a/ii/community-infra/rook-ceph-pool-storageclass.yaml b/ii/community-infra/rook-ceph-pool-storageclass.yaml new file mode 100644 index 0000000..02e3fe2 --- /dev/null +++ b/ii/community-infra/rook-ceph-pool-storageclass.yaml @@ -0,0 +1,59 @@ +apiVersion: ceph.rook.io/v1 +kind: CephBlockPool +metadata: + name: replicapool + namespace: rook-ceph +spec: + failureDomain: host + replicated: + size: 3 +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-block + annotations: + storageclass.kubernetes.io/is-default-class: "true" +# Change "rook-ceph" provisioner prefix to match the operator namespace if needed +provisioner: rook-ceph.rbd.csi.ceph.com +parameters: + # clusterID is the namespace where the rook cluster is running + clusterID: rook-ceph + # Ceph pool into which the RBD image shall be created + pool: replicapool + + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# Delete the rbd volume when a PVC is deleted +reclaimPolicy: Delete diff --git a/ii/community-infra/rook-ceph-pvc-shared-test.yaml b/ii/community-infra/rook-ceph-pvc-shared-test.yaml new file mode 100644 index 0000000..7389f4f --- /dev/null +++ b/ii/community-infra/rook-ceph-pvc-shared-test.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-shared-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-shared diff --git a/ii/community-infra/rook-ceph-pvc-test.yaml b/ii/community-infra/rook-ceph-pvc-test.yaml new file mode 100644 index 0000000..8d0edca --- /dev/null +++ b/ii/community-infra/rook-ceph-pvc-test.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rook-ceph-pvc-test +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Gi + storageClassName: rook-ceph-block +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-pvc-test +spec: + strategy: + type: Recreate + selector: + matchLabels: + name: rook-ceph-pvc-test + template: + metadata: + labels: + name: rook-ceph-pvc-test + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - rook-ceph-pvc-test + topologyKey: "kubernetes.io/hostname" + containers: + - name: rook-ceph-pvc-test + image: alpine:3.12 + command: + - sleep + - infinity + volumeMounts: + - name: rook-ceph-pvc-test + mountPath: /mnt + volumes: + - name: rook-ceph-pvc-test + persistentVolumeClaim: + claimName: rook-ceph-pvc-test diff --git a/ii/community-infra/rook-ceph-toolbox.yaml b/ii/community-infra/rook-ceph-toolbox.yaml new file mode 100644 index 0000000..b1be6f1 --- /dev/null +++ b/ii/community-infra/rook-ceph-toolbox.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: rook-ceph-tools + namespace: rook-ceph + labels: + app: rook-ceph-tools +spec: + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: rook/ceph:v1.7.2 + command: ["/tini"] + args: ["-g", "--", "/usr/local/bin/toolbox.sh"] + imagePullPolicy: IfNotPresent + env: + - name: ROOK_CEPH_USERNAME + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-username + - name: ROOK_CEPH_SECRET + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: ceph-secret + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /etc/ceph + name: ceph-config + - name: mon-endpoint-volume + mountPath: /etc/rook + volumes: + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + - name: ceph-config + emptyDir: {} + tolerations: + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 5 diff --git a/ii/equinix-metal-capi-talos-kubernetes/README.md b/ii/equinix-metal-capi-talos-kubernetes/README.md new file mode 100644 index 0000000..8e17c10 --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/README.md @@ -0,0 +1,720 @@ + +# Introduction + +In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API. + +- **What is [Cluster-API](https://cluster-api.sigs.k8s.io/)?:** + +> Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. + +- **What is [Talos](https://www.talos.dev/)?:** + +> Talos is a modern OS designed to be secure, immutable, and minimal. + +- **What is [Equinix Metal](https://metal.equinix.com/)?:** + +> A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes. + +The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities. + +- **Why is this important?:** In general: Orchestrating a container based OS such as Talos ([Flatcar](http://flatcar-linux.org/), [Fedora CoreOS](https://getfedora.org/coreos/), or [RancherOS](https://rancher.com/products/rancher/)) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge. + +# Dependencies + +What you'll need for this guide: + +- [talosctl](https://github.com/talos-systems/talos/releases/tag/v0.8.1) + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +- [packet-cli](https://github.com/packethost/packet-cli) + +- the ID and API token of existing Equinix Metal project + +- an existing Kubernetes cluster with a public IP (such as [kind](http://kind.sigs.k8s.io/), [minikube](https://minikube.sigs.k8s.io/), or a cluster already on Equinix Metal) + +# Prelimiary steps + +In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via `packet-cli`. + +Set the correct project to create and manage resources in: + +```tmate +read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID +``` + +The API key for your account or project: + +```tmate +read -p 'PACKET_API_KEY: ' PACKET_API_KEY +``` + +Export the variables to be accessible from `packet-cli` and `clusterctl` later on: + +```tmate +export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY +``` + +In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. We'll need this IP address later for use in booting the servers. If you have set up your existing cluster differently, it'll just need to be an IP that we can use. + +```tmate +export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" +``` + +# Setting up Cluster-API + +Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster: + +```tmate +clusterctl init -b talos -c talos -i packet +``` + +This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider. + +****Important**** note: + +- the `bootstrap-talos` controller in the `cabpt-system` namespace must be running a version greater than `v0.2.0-alpha.8`. The version can be displayed in with `clusterctl upgrade plan` when it's installed. + +# Setting up Matchbox + +Currently, since Equinix Metal have ****not**** yet added support for Talos, it is necessary to install [Matchbox](https://matchbox.psdn.io/) to boot the servers (There is an [issue](https://github.com/packethost/packet-images/issues/26) in progress and [feedback](https://feedback.equinixmetal.com/operating-systems/p/talos-as-officially-supported-operating-system) for adding support). + +- **What is Matchbox?:** + +> Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. + +Here is the manifest for a basic matchbox installation: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: matchbox +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets +--- +apiVersion: v1 +kind: Service +metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress +spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 +``` + +Save it as `matchbox.yaml` + +The manifests above were inspired by the manifests in the [matchbox repo](https://github.com/poseidon/matchbox/tree/master/contrib/k8s). For production it might be wise to use: + +- an Ingress with full TLS +- a ReadWriteMany storage provider instead hostPath for scaling + +With the manifests ready to go, we'll install Matchbox into the `matchbox` namespace on the existing cluster with the following commands: + +```tmate +kubectl create ns matchbox +kubectl -n matchbox apply -f ./matchbox.yaml +``` + +You may need to patch the `Service.spec.externalIPs` to have an IP to access it from if one is not populated: + +```tmate +kubectl -n matchbox patch \ + service matchbox \ + -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}" +``` + +Once the pod is live, We'll need to create a directory structure for storing Talos boot assets: + +```tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos +``` + +Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.8.1 into the assets folder: + +```tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + wget -P /var/lib/matchbox/assets/talos \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/initramfs-amd64.xz \ + https://github.com/talos-systems/talos/releases/download/v0.8.1/vmlinuz-amd64 +``` + +Now that the assets have been downloaded, run a checksum against them to verify: + +```tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c "cd /var/lib/matchbox/assets/talos && \ + wget -O- https://github.com/talos-systems/talos/releases/download/v0.8.1/sha512sum.txt 2> /dev/null \ + | sed 's,_out/,,g' \ + | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \ + | sha512sum -c -" +``` + +Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it: + +```tmate +export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}') +``` + +[Profiles in Matchbox](https://matchbox.psdn.io/matchbox/#profiles) are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as `profile-default-amd64.json` + +```json +{ + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } +} +``` + +[Groups in Matchbox](https://matchbox.psdn.io/matchbox/#groups) are a way of letting servers pick up profiles based on selectors. Save this file as `group-default-amd64.json` + +```json +{ + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } +} +``` + +We'll copy the profile and group into their respective folders: + +```tmate +kubectl -n matchbox \ + cp ./profile-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json +kubectl -n matchbox \ + cp ./group-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json +``` + +List the files to validate that they were written correctly: + +```tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c 'ls -alh /var/lib/matchbox/*/' +``` + +## Testing Matchbox + +Using `curl`, we can verify Matchbox's running state: + +```tmate +curl http://$LOAD_BALANCER_IP:8080 +``` + +To test matchbox, we'll create an invalid userdata configuration for Talos, saving as `userdata.txt`: + +```text +#!talos +``` + +Feel free to use a valid one. + +Now let's talk to Equinix Metal to create a server pointing to the Matchbox server: + +```tmate +packet-cli device create \ + --hostname talos-pxe-boot-test-1 \ + --plan c1.small.x86 \ + --facility sjc1 \ + --operating-system custom_ipxe \ + --project-id "$PACKET_PROJECT_ID" \ + --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \ + --userdata-file=./userdata.txt +``` + +In the meanwhile, we can watch the logs to see how things are: + +```tmate +kubectl -n matchbox logs deployment/matchbox -f --tail=100 +``` + +Looking at the logs, there should be some get requests of resources that will be used to boot the OS. + +Notes: + +- fun fact: you can run Matchbox on Android using [Termux](https://f-droid.org/en/packages/com.termux/). + +# The cluster + +## Preparing the cluster + +Here we will declare the template that we will shortly generate our usable cluster from: + +```yaml +kind: TalosControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/packethost/packet-ccm/releases/download/v1.1.0/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.8.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} +spec: + template: + spec: + generateType: init +``` + +Inside of `TalosControlPlane.spec.controlPlaneConfig.init`, I'm very much liking the use of `generateType: init` paired with `configPatches`. This enables: + +- configuration to be generated; +- management of certificates out of the cluster operator's hands; +- another level of standardisation; and +- overrides to be added where needed + +Notes: + +- the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0 + +### Templating your configuration + +Set environment variables for configuration: + +```bash +export CLUSTER_NAME="talos-metal" +export FACILITY=sjc1 +export KUBERNETES_VERSION=v1.20.2 +export POD_CIDR=10.244.0.0/16 +export SERVICE_CIDR=10.96.0.0/12 +export CONTROLPLANE_NODE_TYPE=c1.small.x86 +export CONTROL_PLANE_MACHINE_COUNT=3 +export WORKER_NODE_TYPE=c1.small.x86 +export WORKER_MACHINE_COUNT=0 +export SSH_KEY="" +export IPXE_URL=$LOAD_BALANCER_IP +``` + +In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads. + +### Render the manifests + +Render your cluster configuration from the template: + +```tmate +clusterctl config cluster "$CLUSTER_NAME" \ + --from ./talos-packet-cluster-template.yaml \ + -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +``` + +## Creating the cluster + +With the template for the cluster rendered to how wish to deploy it, it's now time to apply it: + +```tmate +kubectl create ns "$CLUSTER_NAME" +kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml +``` + +The cluster will now be brought up, we can see the progress by taking a look at the resources: + +```tmate +kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters +``` + +Note: As expected, the cluster may take some time to appear and be accessible. + +Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with: + +```tmate +kubectl -n "$CLUSTER_NAME" get secrets \ + "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \ + | base64 -d > $HOME/.kube/"$CLUSTER_NAME" +``` + +Using the KubeConfig from the new cluster, check out the status of it: + +```tmate +kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info +``` + +Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal: + +```tmate +kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \ + create secret generic packet-cloud-config \ + --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}" +``` + +Since we're able to talk to the APIServer, we can check how all Pods are doing: + +```bash +export CLUSTER_NAME="talos-metal" +kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\ + -n kube-system get pods +``` + +Listing Pods shows that everything is live and in a good state: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-5b55f9f688-fb2cb 1/1 Running 0 25m +kube-system coredns-5b55f9f688-qsvg5 1/1 Running 0 25m +kube-system kube-apiserver-665px 1/1 Running 0 19m +kube-system kube-apiserver-mz68q 1/1 Running 0 19m +kube-system kube-apiserver-qfklt 1/1 Running 2 19m +kube-system kube-controller-manager-6grxd 1/1 Running 0 19m +kube-system kube-controller-manager-cf76h 1/1 Running 0 19m +kube-system kube-controller-manager-dsmgf 1/1 Running 0 19m +kube-system kube-flannel-brdxw 1/1 Running 0 24m +kube-system kube-flannel-dm85d 1/1 Running 0 24m +kube-system kube-flannel-sg6k9 1/1 Running 0 24m +kube-system kube-proxy-flx59 1/1 Running 0 24m +kube-system kube-proxy-gbn4l 1/1 Running 0 24m +kube-system kube-proxy-ns84v 1/1 Running 0 24m +kube-system kube-scheduler-4qhjw 1/1 Running 0 19m +kube-system kube-scheduler-kbm5z 1/1 Running 0 19m +kube-system kube-scheduler-klsmp 1/1 Running 0 19m +kube-system packet-cloud-controller-manager-77cd8c9c7c-cdzfv 1/1 Running 0 20m +kube-system pod-checkpointer-4szh6 1/1 Running 0 19m +kube-system pod-checkpointer-4szh6-talos-metal-control-plane-j29lb 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j 1/1 Running 0 19m +``` + +With the cluster live, it's now ready for workloads to be deployed! + +# Talos Configuration + +In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use. + +Create the directory for the config: + +```tmate +mkdir -p $HOME/.talos +``` + +Discover the IP for the first controlPlane: + +```tmate +export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \ + get machines \ + $(kubectl -n "$CLUSTER_NAME" \ + get machines -l cluster.x-k8s.io/control-plane='' \ + --no-headers --output=jsonpath='{.items[0].metadata.name}') \ + -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}') +``` + +Fetch the `talosconfig` from the existing cluster: + +```tmate +kubectl get talosconfig \ + -n $CLUSTER_NAME \ + -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \ + -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml +``` + +Write in the configuration the endpoint IP and node IP: + +```tmate +talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config endpoint $TALOS_ENDPOINT +talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config node $TALOS_ENDPOINT +``` + +Now that the `talosconfig` has been written, try listing all containers: + +```bash +export CLUSTER_NAME="talos-metal" +# removing ip; omit ` | sed ...` for regular use +talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x "/ +``` + +Here's the containers running on this particular node, in containerd (not k8s related): + +```bash +NODE NAMESPACE ID IMAGE PID STATUS +x.x.x.x system apid talos/apid 3046 RUNNING +x.x.x.x system etcd gcr.io/etcd-development/etcd:v3.4.14 3130 RUNNING +x.x.x.x system networkd talos/networkd 2879 RUNNING +x.x.x.x system routerd talos/routerd 2888 RUNNING +x.x.x.x system timed talos/timed 2976 RUNNING +x.x.x.x system trustd talos/trustd 3047 RUNNING +``` + +# Clean up + +Tearing down the entire cluster and resources associated with it, can be achieved by + +i. Deleting the cluster: + +```tmate +kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME" +``` + +ii. Deleting the namespace: + +```tmate +kubectl delete ns "$CLUSTER_NAME" +``` + +iii. Removing local configurations: + +```tmate +rm \ + $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + $HOME/.kube/"$CLUSTER_NAME" +``` + +# What have I learned from this? + +- **(always learning) how wonderful the Kubernetes community is:** there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group. +- **how modular Cluster-API is:** Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways. + +# Credits + +Integrating Talos into this project would not be possible without help from [Andrew Rynhard (Talos Systems)](https://github.com/andrewrynhard), huge thanks to him for reaching out for pairing and co-authoring. + +# Notes and references + +- with the new cluster's controlPlane live and available for deployment, the iPXE server could be moved into that cluster - meaning that new servers boot from the cluster that they'll join, making it almost self-contained +- cluster configuration as based off of [cluster-template.yaml from the cluster-api-provider-packet repo](https://github.com/kubernetes-sigs/cluster-api-provider-packet/blob/479faf06e1337b1e979cb624ca8be015b2a89cde/templates/cluster-template.yaml) +- this post has been made to [blog.calebwoodine.com](https://blog.calebwoodbine.com/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal), and [talos-system.com/blog](https://ii.coop/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal/), but is also available as an [Org file](https://github.com/ii/org/blob/master/ii/equinix-metal-capi-talos-kubernetes/README.org) + +--- + +Hope you've enjoyed the output of this project! Thank you! diff --git a/ii/equinix-metal-capi-talos-kubernetes/README.org b/ii/equinix-metal-capi-talos-kubernetes/README.org new file mode 100644 index 0000000..98c31e0 --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/README.org @@ -0,0 +1,734 @@ +#+TITLE: Cluster-API + Talos + Equinix Metal +#+AUTHOR: Caleb Woodbine +#+AUTHOR: Andrew Rynhard +#+DATE: 21st of Janurary 2020 +#+PROPERTY: header-args:tmate+ :dir . :window capi-talos-metal + +* Introduction + +In this guide we will launch a highly-available three Node Kubernetes cluster on Equinix Metal using Talos as the Node OS, as well as bootstrap, and controlPlane provider for Cluster-API. + +- What is [[https://cluster-api.sigs.k8s.io/][Cluster-API]]? :: +#+begin_quote +Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. +#+end_quote + +- What is [[https://www.talos.dev/][Talos]]? :: +#+begin_quote +Talos is a modern OS designed to be secure, immutable, and minimal. +#+end_quote + +- What is [[https://metal.equinix.com/][Equinix Metal]]? :: +#+begin_quote +A globally-available bare metal “as-a-service” that can be deployed and interconnected in minutes. +#+end_quote +The folks over at Equinix Metal have a wonderful heart for supporting Open Source communities. + +- Why is this important? :: In general: Orchestrating a container based OS such as Talos ([[http://flatcar-linux.org/][Flatcar]], [[https://getfedora.org/coreos/][Fedora CoreOS]], or [[https://rancher.com/products/rancher/][RancherOS]]) shifts focus from the Nodes to the workloads. In terms of Talos: Currently the documentation for running an OS such as Talos in Equinix Metal for Kubernetes with Cluster-API is not so well documented and therefore inaccessible. It's important to fill in the gaps of knowledge. + +* Dependencies + +What you'll need for this guide: + +- [[https://github.com/talos-systems/talos/releases/tag/v0.12.1][talosctl]] + +- [[https://kubernetes.io/docs/tasks/tools/install-kubectl/][kubectl]] + +- [[https://github.com/packethost/packet-cli][packet-cli]] + +- the ID and API token of existing Equinix Metal project + +- an existing Kubernetes cluster with a public IP (such as [[http://kind.sigs.k8s.io/][kind]], [[https://minikube.sigs.k8s.io/][minikube]], or a cluster already on Equinix Metal) + +* Prelimiary steps + +In order to talk to Equinix Metal, we'll export environment variables to configure resources and talk via ~packet-cli~. + +Set the correct project to create and manage resources in: +#+begin_src tmate +read -p 'PACKET_PROJECT_ID: ' PACKET_PROJECT_ID +#+end_src + +The API key for your account or project: +#+begin_src tmate +read -p 'PACKET_API_KEY: ' PACKET_API_KEY +#+end_src + +Export the variables to be accessible from ~packet-cli~ and ~clusterctl~ later on: +#+begin_src tmate +export PACKET_PROJECT_ID PACKET_API_KEY PACKET_TOKEN=$PACKET_API_KEY +#+end_src + +In the existing cluster, a public LoadBalancer IP will be needed. I have already installed nginx-ingress in this cluster, which has got a Service with the cluster's elastic IP. +We'll need this IP address later for use in booting the servers. +If you have set up your existing cluster differently, it'll just need to be an IP that we can use. +#+begin_src tmate +export LOAD_BALANCER_IP="$(kubectl -n nginx-ingress get svc nginx-ingress-ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" +#+end_src + +* Setting up Cluster-API + +Install Talos providers for Cluster-API bootstrap and controlplane in your existing cluster: +#+begin_src tmate +clusterctl init -b talos -c talos -i packet +#+end_src + +This will install Talos's bootstrap and controlPlane controllers as well as the Packet / Equinix Metal infrastructure provider. + +**Important** note: +- the ~bootstrap-talos~ controller in the ~cabpt-system~ namespace must be running a version greater than ~v0.2.0-alpha.8~. The version can be displayed in with ~clusterctl upgrade plan~ when it's installed. + +* Setting up Matchbox + +Currently, since Equinix Metal have **not** yet added support for Talos, it is necessary to install [[https://matchbox.psdn.io/][Matchbox]] to boot the servers (There is an [[https://github.com/packethost/packet-images/issues/26][issue]] in progress and [[https://feedback.equinixmetal.com/operating-systems/p/talos-as-officially-supported-operating-system][feedback]] for adding support). + +- What is Matchbox? :: +#+begin_quote +Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. +#+end_quote + +Here is the manifest for a basic matchbox installation: +#+begin_src yaml :tangle ./matchbox.yaml :comments none +apiVersion: apps/v1 +kind: Deployment +metadata: + name: matchbox +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets +--- +apiVersion: v1 +kind: Service +metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress +spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 +#+end_src +Save it as ~matchbox.yaml~ + +The manifests above were inspired by the manifests in the [[https://github.com/poseidon/matchbox/tree/master/contrib/k8s][matchbox repo]]. +For production it might be wise to use: +- an Ingress with full TLS +- a ReadWriteMany storage provider instead hostPath for scaling + +With the manifests ready to go, we'll install Matchbox into the ~matchbox~ namespace on the existing cluster with the following commands: +#+begin_src tmate + kubectl create ns matchbox + kubectl -n matchbox apply -f ./matchbox.yaml +#+end_src + +You may need to patch the ~Service.spec.externalIPs~ to have an IP to access it from if one is not populated: +#+begin_src tmate +kubectl -n matchbox patch \ + service matchbox \ + -p "{\"spec\":{\"externalIPs\":[\"$LOAD_BALANCER_IP\"]}}" +#+end_src + +Once the pod is live, We'll need to create a directory structure for storing Talos boot assets: +#+begin_src tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + mkdir -p /var/lib/matchbox/{profiles,groups} /var/lib/matchbox/assets/talos +#+end_src + +Inside the Matchbox container, we'll download the Talos boot assets for Talos version 0.12.1 into the assets folder: +#+begin_src tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + wget -P /var/lib/matchbox/assets/talos \ + https://github.com/talos-systems/talos/releases/download/v0.12.1/initramfs-amd64.xz \ + https://github.com/talos-systems/talos/releases/download/v0.12.1/vmlinuz-amd64 +#+end_src + +Now that the assets have been downloaded, run a checksum against them to verify: +#+begin_src tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c "cd /var/lib/matchbox/assets/talos && \ + wget -O- https://github.com/talos-systems/talos/releases/download/v0.12.1/sha512sum.txt 2> /dev/null \ + | sed 's,_out/,,g' \ + | grep 'initramfs-amd64.xz\|vmlinuz-amd64' \ + | sha512sum -c -" +#+end_src + +Since there's only one Pod in the Matchbox deployment, we'll export it's name to copy files into it: +#+begin_src tmate +export MATCHBOX_POD_NAME=$(kubectl -n matchbox get pods -l name=matchbox -o=jsonpath='{.items[0].metadata.name}') +#+end_src + +[[https://matchbox.psdn.io/matchbox/#profiles][Profiles in Matchbox]] are JSON configurations for how the servers should boot, where from, and their kernel args. Save this file as ~profile-default-amd64.json~ +#+begin_src json :tangle ./profile-default-amd64.json :comments none +{ + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } +} +#+end_src + +[[https://matchbox.psdn.io/matchbox/#groups][Groups in Matchbox]] are a way of letting servers pick up profiles based on selectors. Save this file as ~group-default-amd64.json~ +#+begin_src json :tangle ./group-default-amd64.json :comments none +{ + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } +} +#+end_src + +We'll copy the profile and group into their respective folders: +#+begin_src tmate +kubectl -n matchbox \ + cp ./profile-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/profiles/default-amd64.json +kubectl -n matchbox \ + cp ./group-default-amd64.json \ + $MATCHBOX_POD_NAME:/var/lib/matchbox/groups/default-amd64.json +#+end_src + +List the files to validate that they were written correctly: +#+begin_src tmate +kubectl -n matchbox exec -it \ + deployment/matchbox -- \ + sh -c 'ls -alh /var/lib/matchbox/*/' +#+end_src + +** Testing Matchbox + +Using ~curl~, we can verify Matchbox's running state: +#+begin_src tmate +curl http://$LOAD_BALANCER_IP:8080 +#+end_src + +To test matchbox, we'll create an invalid userdata configuration for Talos, saving as ~userdata.txt~: +#+begin_src text :tangle ./userdata.txt :comments none +#!talos +#+end_src +Feel free to use a valid one. + +Now let's talk to Equinix Metal to create a server pointing to the Matchbox server: +#+begin_src tmate +packet-cli device create \ + --hostname talos-pxe-boot-test-1 \ + --plan c1.small.x86 \ + --facility sjc1 \ + --operating-system custom_ipxe \ + --project-id "$PACKET_PROJECT_ID" \ + --ipxe-script-url "http://$LOAD_BALANCER_IP:8080/ipxe?arch=amd64" \ + --userdata-file=./userdata.txt +#+end_src + +In the meanwhile, we can watch the logs to see how things are: +#+begin_src tmate +kubectl -n matchbox logs deployment/matchbox -f --tail=100 +#+end_src + +Looking at the logs, there should be some get requests of resources that will be used to boot the OS. + +Notes: +- fun fact: you can run Matchbox on Android using [[https://f-droid.org/en/packages/com.termux/][Termux]]. + +* The cluster + +** Preparing the cluster + +Here we will declare the template that we will shortly generate our usable cluster from: +#+begin_src yaml :tangle ./talos-packet-cluster-template.yaml :comments none +kind: TalosControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.12.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.2.2/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.12.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} +spec: + template: + spec: + generateType: init +#+end_src + +Inside of ~TalosControlPlane.spec.controlPlaneConfig.init~, I'm very much liking the use of ~generateType: init~ paired with ~configPatches~. This enables: +- configuration to be generated; +- management of certificates out of the cluster operator's hands; +- another level of standardisation; and +- overrides to be added where needed + +Notes: +- the ClusterAPI template above uses Packet-Cloud-Controller manager version 1.1.0 + +*** Cluster name :noexport: +#+name: cluster-config-env-name +#+begin_src bash +export CLUSTER_NAME="ii-nz" +#+end_src + +*** Templating your configuration + +Set environment variables for configuration: +#+name: cluster-config-env +#+begin_src bash :noweb yes +<> +export FACILITY=sjc1 +export KUBERNETES_VERSION=v1.21.1 +export POD_CIDR=10.244.0.0/16 +export SERVICE_CIDR=10.96.0.0/12 +export CONTROLPLANE_NODE_TYPE=c3.small.x86 +export CONTROL_PLANE_MACHINE_COUNT=3 +export WORKER_NODE_TYPE=c3.small.x86 +export WORKER_MACHINE_COUNT=0 +export SSH_KEY="" +export IPXE_SERVER_IP=$LOAD_BALANCER_IP +#+end_src + +In the variables above, we will create a cluster which has three small controlPlane nodes to run workloads. + +**** Apply the variables :noexport: + +Set the env in the tmate session: +#+begin_src tmate :noweb yes +<> +#+end_src + +*** Render the manifests +Render your cluster configuration from the template: +#+begin_src tmate :noweb yes +clusterctl config cluster "$CLUSTER_NAME" \ + --from ./talos-packet-cluster-template.yaml \ + -n "$CLUSTER_NAME" > "$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +** Creating the cluster + +With the template for the cluster rendered to how wish to deploy it, it's now time to apply it: +#+begin_src tmate +kubectl create ns "$CLUSTER_NAME" +kubectl -n "$CLUSTER_NAME" apply -f ./"$CLUSTER_NAME"-cluster-capi.yaml +#+end_src + +The cluster will now be brought up, we can see the progress by taking a look at the resources: +#+begin_src tmate +kubectl -n "$CLUSTER_NAME" get machines,clusters,packetmachines,packetclusters +#+end_src + +Note: As expected, the cluster may take some time to appear and be accessible. + +Not long after applying, a KubeConfig is available. Fetch the KubeConfig from the existing cluster with: +#+begin_src tmate +kubectl -n "$CLUSTER_NAME" get secrets \ + "$CLUSTER_NAME"-kubeconfig -o=jsonpath='{.data.value}' \ + | base64 -d > $HOME/.kube/"$CLUSTER_NAME" +#+end_src + +Using the KubeConfig from the new cluster, check out the status of it: +#+begin_src tmate +kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" cluster-info +#+end_src + +Once the APIServer is reachable, create configuration for how the Packet-Cloud-Controller-Manager should talk to Equinix-Metal: +#+begin_src tmate +kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME" -n kube-system \ + create secret generic packet-cloud-config \ + --from-literal=cloud-sa.json="{\"apiKey\": \"${PACKET_API_KEY}\",\"projectID\": \"${PACKET_PROJECT_ID}\"}" +#+end_src + +Since we're able to talk to the APIServer, we can check how all Pods are doing: +#+name: list all Pods +#+begin_src bash :noweb yes +<> +kubectl --kubeconfig $HOME/.kube/"$CLUSTER_NAME"\ + -n kube-system get pods +#+end_src + +Listing Pods shows that everything is live and in a good state: +#+RESULTS: list all Pods +#+begin_src bash +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-5b55f9f688-fb2cb 1/1 Running 0 25m +kube-system coredns-5b55f9f688-qsvg5 1/1 Running 0 25m +kube-system kube-apiserver-665px 1/1 Running 0 19m +kube-system kube-apiserver-mz68q 1/1 Running 0 19m +kube-system kube-apiserver-qfklt 1/1 Running 2 19m +kube-system kube-controller-manager-6grxd 1/1 Running 0 19m +kube-system kube-controller-manager-cf76h 1/1 Running 0 19m +kube-system kube-controller-manager-dsmgf 1/1 Running 0 19m +kube-system kube-flannel-brdxw 1/1 Running 0 24m +kube-system kube-flannel-dm85d 1/1 Running 0 24m +kube-system kube-flannel-sg6k9 1/1 Running 0 24m +kube-system kube-proxy-flx59 1/1 Running 0 24m +kube-system kube-proxy-gbn4l 1/1 Running 0 24m +kube-system kube-proxy-ns84v 1/1 Running 0 24m +kube-system kube-scheduler-4qhjw 1/1 Running 0 19m +kube-system kube-scheduler-kbm5z 1/1 Running 0 19m +kube-system kube-scheduler-klsmp 1/1 Running 0 19m +kube-system packet-cloud-controller-manager-77cd8c9c7c-cdzfv 1/1 Running 0 20m +kube-system pod-checkpointer-4szh6 1/1 Running 0 19m +kube-system pod-checkpointer-4szh6-talos-metal-control-plane-j29lb 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h 1/1 Running 0 19m +kube-system pod-checkpointer-k7w8h-talos-metal-control-plane-lk9f2 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh 1/1 Running 0 19m +kube-system pod-checkpointer-m5wrh-talos-metal-control-plane-h9v4j 1/1 Running 0 19m +#+end_src + +With the cluster live, it's now ready for workloads to be deployed! + +* Talos Configuration + +In order to manage Talos Nodes outside of Kubernetes, we need to create and set up configuration to use. + +Create the directory for the config: +#+begin_src tmate +mkdir -p $HOME/.talos +#+end_src + +Discover the IP for the first controlPlane: +#+begin_src tmate +export TALOS_ENDPOINT=$(kubectl -n "$CLUSTER_NAME" \ + get machines \ + $(kubectl -n "$CLUSTER_NAME" \ + get machines -l cluster.x-k8s.io/control-plane='' \ + --no-headers --output=jsonpath='{.items[0].metadata.name}') \ + -o=jsonpath="{.status.addresses[?(@.type=='ExternalIP')].address}" | awk '{print $2}') +#+end_src + +Fetch the ~talosconfig~ from the existing cluster: +#+begin_src tmate +kubectl get talosconfig \ + -n $CLUSTER_NAME \ + -l cluster.x-k8s.io/cluster-name=$CLUSTER_NAME \ + -o yaml -o jsonpath='{.items[0].status.talosConfig}' > $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml +#+end_src + +Write in the configuration the endpoint IP and node IP: +#+begin_src tmate +talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config endpoint $TALOS_ENDPOINT +talosctl \ + --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + config node $TALOS_ENDPOINT +#+end_src + +Now that the ~talosconfig~ has been written, try listing all containers: +#+name: list-containers-on-containerd +#+begin_src bash :noweb yes +<> +# removing ip; omit ` | sed ...` for regular use +talosctl --talosconfig $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml containers | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/"x.x.x.x "/ +#+end_src + +Here's the containers running on this particular node, in containerd (not k8s related): +#+RESULTS: list-containers-on-containerd +#+begin_src bash +NODE NAMESPACE ID IMAGE PID STATUS +x.x.x.x system apid talos/apid 3046 RUNNING +x.x.x.x system etcd gcr.io/etcd-development/etcd:v3.4.14 3130 RUNNING +x.x.x.x system networkd talos/networkd 2879 RUNNING +x.x.x.x system routerd talos/routerd 2888 RUNNING +x.x.x.x system timed talos/timed 2976 RUNNING +x.x.x.x system trustd talos/trustd 3047 RUNNING +#+end_src + +* Clean up + +Tearing down the entire cluster and resources associated with it, can be achieved by + +i. Deleting the cluster: +#+begin_src tmate +kubectl -n "$CLUSTER_NAME" delete cluster "$CLUSTER_NAME" +#+end_src + +ii. Deleting the namespace: +#+begin_src tmate +kubectl delete ns "$CLUSTER_NAME" +#+end_src + +iii. Removing local configurations: +#+begin_src tmate +rm \ + $HOME/.talos/"$CLUSTER_NAME"-management-plane-talosconfig.yaml \ + $HOME/.kube/"$CLUSTER_NAME" +#+end_src + +* What have I learned from this? +- (always learning) how wonderful the Kubernetes community is :: there are so many knowledgable individuals who are so ready for collaboration and adoption - it doesn't matter the SIG or group. +- how modular Cluster-API is :: Cluster-API components (bootstrap, controlPlane, core, infrastructure) can be swapped out and meshed together in very cool ways. + +* Credits +Integrating Talos into this project would not be possible without help from [[https://github.com/andrewrynhard][Andrew Rynhard (Talos Systems)]], huge thanks to him for reaching out for pairing and co-authoring. + +* Notes and references +- with the new cluster's controlPlane live and available for deployment, the iPXE server could be moved into that cluster - meaning that new servers boot from the cluster that they'll join, making it almost self-contained +- cluster configuration as based off of [[https://github.com/kubernetes-sigs/cluster-api-provider-packet/blob/479faf06e1337b1e979cb624ca8be015b2a89cde/templates/cluster-template.yaml][cluster-template.yaml from the cluster-api-provider-packet repo]] +- this post has been made to [[https://blog.calebwoodbine.com/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal][blog.calebwoodine.com]], and [[https://ii.coop/deploying-talos-and-kubernetes-with-cluster-api-on-equinix-metal/][talos-system.com/blog]], but is also available as an [[https://github.com/ii/org/blob/master/ii/equinix-metal-capi-talos-kubernetes/README.org][Org file]] + +----- + +Hope you've enjoyed the output of this project! +Thank you! + +* Footnotes + +#+REVEAL_ROOT: https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.9.2 +#+NOREVEAL_ROOT: https://raw.githubusercontent.com/hakimel/reveal.js/3.9.2/ +# #+REVEAL_TITLE_SLIDE: +#+NOREVEAL_DEFAULT_FRAG_STYLE: YY +#+NOREVEAL_EXTRA_CSS: YY +#+NOREVEAL_EXTRA_JS: YY +#+REVEAL_HLEVEL: 2 +#+REVEAL_MARGIN: 0.1 +#+REVEAL_WIDTH: 1000 +#+REVEAL_HEIGHT: 600 +#+REVEAL_MAX_SCALE: 3.5 +#+REVEAL_MIN_SCALE: 0.2 +#+REVEAL_PLUGINS: (markdown notes highlight multiplex) +#+REVEAL_SLIDE_NUMBER: "" +#+REVEAL_SPEED: 1 +#+REVEAL_THEME: moon +#+REVEAL_THEME_OPTIONS: beige|black|blood|league|moon|night|serif|simple|sky|solarized|white +#+REVEAL_TRANS: cube +#+REVEAL_TRANS_OPTIONS: none|cube|fade|concave|convex|page|slide|zoom + +#+OPTIONS: num:nil +#+OPTIONS: toc:nil +#+OPTIONS: mathjax:Y +#+OPTIONS: reveal_single_file:nil +#+OPTIONS: reveal_control:t +#+OPTIONS: reveal-progress:t +#+OPTIONS: reveal_history:nil +#+OPTIONS: reveal_center:t +#+OPTIONS: reveal_rolling_links:nil +#+OPTIONS: reveal_keyboard:t +#+OPTIONS: reveal_overview:t diff --git a/ii/equinix-metal-capi-talos-kubernetes/build-talos-ipxe-assets.org b/ii/equinix-metal-capi-talos-kubernetes/build-talos-ipxe-assets.org new file mode 100644 index 0000000..ea77329 --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/build-talos-ipxe-assets.org @@ -0,0 +1,73 @@ +#+TITLE: Build Talos iPXE assets + +#+NAME: get-talos-version +#+begin_src elisp +(print "v0.8.1") +#+end_src + +* Download tools + +#+begin_src tmate :dir . :window capi-talos-metal + wget -O $HOME/bin/packet-save2image https://raw.githubusercontent.com/packethost/packet-images/master/tools/packet-save2image + chmod +x $HOME/bin/packet-save2image +#+end_src + +* Prepare custom image + +#+begin_src tmate :dir . :window capi-talos-metal :noweb yes + tmp=$(mktemp -d -t initrd4me-XXXXXXX) + mkdir -p $tmp/boot + OUTDIR=$PWD/metal + mkdir -p $OUTDIR +#+end_src + +#+begin_src tmate :dir . :window capi-talos-metal :noweb yes + curl -O -L https://github.com/talos-systems/talos/releases/download/<>/initramfs-amd64.xz + curl -O -L https://github.com/talos-systems/talos/releases/download/<>/vmlinuz-amd64 +#+end_src + +#+begin_src tmate :dir . :window capi-talos-metal :noweb yes + docker pull ghcr.io/talos-systems/installer:<> + docker save ghcr.io/talos-systems/installer:<> > talos-installer-<>.tar +#+end_src + +#+begin_src tmate :dir . :window capi-talos-metal :noweb yes + IMAGE=talos-installer-<>.tar + OUTDIR=metal + + IMAGETMP=metal-tmp + mkdir -p $IMAGETMP + tar -xvf $IMAGE -C $IMAGETMP + + tmp=$(mktemp -d -t initrd4me-XXXXXXX) + mkdir $tmp/boot + mkdir -p $OUTDIR + echo "Working directory is: $PWD" + echo "Temp directory is: $tmp" + echo "Archive dir is: $OUTDIR" + + echo "Preparing kernel..." + xz -d initramfs-amd64.xz + + echo "Archiving kernel..." + mv vmlinuz-amd64 $tmp/boot/vmlinuz + tar -czf $OUTDIR/kernel.tar.gz -C $tmp/boot ./vmlinuz --totals --checkpoint=.1000 + + echo "Archiving initrd..." + cp $INITRD $tmp/boot + mv initramfs-amd64 $tmp/boot/initrd + tar -czf $OUTDIR/initrd.tar.gz -C $tmp/boot ./initrd --totals --checkpoint=.1000 + + packet-save2image -v < talos-installer-<>.tar > $OUTDIR/image.tar.gz + #rm -rf $IMAGETMP +#+end_src + +#+begin_src tmate :dir . :window capi-talos-metal :noweb yes + packet-save2image -v < talos-installer-<>.tar > packet-talos-installer-<>.tar.gz +#+end_src + +* Notes and references + +- https://github.com/talos-systems/cluster-api-control-plane-provider-talos +- https://github.com/talos-systems/cluster-api-bootstrap-provider-talos/tree/fe0614e8f25f061cc7df641a9f627fc507e6f582/config/samples/cluster-deployment/gcp +- https://github.com/talos-systems/cluster-api-bootstrap-provider-talos diff --git a/ii/equinix-metal-capi-talos-kubernetes/group-default-amd64.json b/ii/equinix-metal-capi-talos-kubernetes/group-default-amd64.json new file mode 100644 index 0000000..b74be2d --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/group-default-amd64.json @@ -0,0 +1,8 @@ +{ + "id": "default-amd64", + "name": "default-amd64", + "profile": "default-amd64", + "selector": { + "arch": "amd64" + } +} diff --git a/ii/equinix-metal-capi-talos-kubernetes/matchbox.yaml b/ii/equinix-metal-capi-talos-kubernetes/matchbox.yaml new file mode 100644 index 0000000..7af0178 --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/matchbox.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: matchbox +spec: + replicas: 1 + strategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + name: matchbox + template: + metadata: + labels: + name: matchbox + spec: + containers: + - name: matchbox + image: quay.io/poseidon/matchbox:v0.9.0 + env: + - name: MATCHBOX_ADDRESS + value: "0.0.0.0:8080" + - name: MATCHBOX_LOG_LEVEL + value: "debug" + ports: + - name: http + containerPort: 8080 + livenessProbe: + initialDelaySeconds: 5 + httpGet: + path: / + port: 8080 + resources: + requests: + cpu: 30m + memory: 20Mi + limits: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: data + mountPath: /var/lib/matchbox + - name: assets + mountPath: /var/lib/matchbox/assets + volumes: + - name: data + hostPath: + path: /var/local/matchbox/data + - name: assets + hostPath: + path: /var/local/matchbox/assets +--- +apiVersion: v1 +kind: Service +metadata: + name: matchbox + annotations: + metallb.universe.tf/allow-shared-ip: nginx-ingress +spec: + type: LoadBalancer + selector: + name: matchbox + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 diff --git a/ii/equinix-metal-capi-talos-kubernetes/profile-default-amd64.json b/ii/equinix-metal-capi-talos-kubernetes/profile-default-amd64.json new file mode 100644 index 0000000..9e374f4 --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/profile-default-amd64.json @@ -0,0 +1,24 @@ +{ + "id": "default-amd64", + "name": "default-amd64", + "boot": { + "kernel": "/assets/talos/vmlinuz-amd64", + "initrd": [ + "/assets/talos/initramfs-amd64.xz" + ], + "args": [ + "initrd=initramfs-amd64.xz", + "init_on_alloc=1", + "init_on_free=1", + "slub_debug=P", + "pti=on", + "random.trust_cpu=on", + "console=tty0", + "console=ttyS1,115200n8", + "slab_nomerge", + "printk.devkmsg=on", + "talos.platform=packet", + "talos.config=none" + ] + } +} diff --git a/ii/equinix-metal-capi-talos-kubernetes/talos-packet-cluster-template.yaml b/ii/equinix-metal-capi-talos-kubernetes/talos-packet-cluster-template.yaml new file mode 100644 index 0000000..80e0310 --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/talos-packet-cluster-template.yaml @@ -0,0 +1,170 @@ +kind: TalosControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate + name: "${CLUSTER_NAME}-control-plane" + controlPlaneConfig: + init: + generateType: init + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.12.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/extraManifests + value: + - https://github.com/equinix/cloud-provider-equinix-metal/releases/download/v3.2.2/deployment.yaml + - op: add + path: /cluster/allowSchedulingOnMasters + value: true + controlplane: + generateType: controlplane + configPatches: + - op: replace + path: /machine/install + value: + disk: /dev/sda + image: ghcr.io/talos-systems/installer:v0.12.1 + bootloader: true + wipe: false + force: false + - op: add + path: /machine/kubelet/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/apiServer/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/controllerManager/extraArgs + value: + cloud-provider: external + - op: add + path: /cluster/allowSchedulingOnMasters + value: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${CONTROLPLANE_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - ${POD_CIDR:=192.168.0.0/16} + services: + cidrBlocks: + - ${SERVICE_CIDR:=172.26.0.0/16} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + projectID: "${PACKET_PROJECT_ID}" + facility: "${FACILITY}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-a + spec: + version: ${KUBERNETES_VERSION} + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-worker-a + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: PacketMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: PacketMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a +spec: + template: + spec: + OS: custom_ipxe + ipxeURL: "http://${IPXE_SERVER_IP}:8080/ipxe?arch=amd64" + billingCycle: hourly + machineType: "${WORKER_NODE_TYPE}" + sshKeys: + - "${SSH_KEY}" + tags: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-worker-a + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} +spec: + template: + spec: + generateType: init diff --git a/ii/equinix-metal-capi-talos-kubernetes/userdata.txt b/ii/equinix-metal-capi-talos-kubernetes/userdata.txt new file mode 100644 index 0000000..98f74ad --- /dev/null +++ b/ii/equinix-metal-capi-talos-kubernetes/userdata.txt @@ -0,0 +1 @@ +#!talos diff --git a/ii/ft-ii-nz/README.org b/ii/ft-ii-nz/README.org new file mode 100644 index 0000000..67c0998 --- /dev/null +++ b/ii/ft-ii-nz/README.org @@ -0,0 +1,245 @@ +#+TITLE: ft.ii.nz +#+PROPERTY: header-args:yaml+ :comments none + +#+begin_quote +A FlatTrack instance for ii.nz +#+end_quote + +* Postgres +This is a simple and locked-down Postgres deployment that it'll be reusing, until [[https://github.com/zalando/postgres-operator][Postgres-Operator]] runs in my cluster. At the moment it's quite a challenge and seems uncertain. + +#+begin_src yaml :tangle ./postgres.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres +spec: + replicas: 1 + serviceName: "postgres" + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + restartPolicy: Always + containers: + - name: postgres + image: docker.io/postgres:12.7-alpine + securityContext: + readOnlyRootFilesystem: true + runAsUser: 70 + runAsGroup: 70 + allowPrivilegeEscalation: false + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + - name: var-run-postgresql + mountPath: /var/run/postgresql + - name: tmp + mountPath: /tmp + ports: + - containerPort: 5432 + livenessProbe: + exec: + command: + - "sh" + - "-c" + - "pg_isready" + - "-U" + - "$POSTGRES_USER" + failureThreshold: 5 + periodSeconds: 10 + timeoutSeconds: 5 + env: + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_DB + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + - name: PGDATABASE + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGDATABASE + - name: PGUSER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGUSER + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + initContainers: + - name: postgres-db-permissions-fix + image: alpine:3.12 + command: + - /bin/sh + - -c + - "/bin/chown -R 70:70 /var/lib/postgresql/data" + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + volumes: + - name: var-lib-postgresql + persistentVolumeClaim: + claimName: postgres-pvc + - name: var-run-postgresql + emptyDir: {} + - name: tmp + emptyDir: {} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: "5432" + port: 5432 + targetPort: 5432 +#+end_src +* Deploy a FlatTrack instance +** Prepare +#+begin_src shell :results silent +kubectl create namespace ft-ii-nz --dry-run=client -o yaml \ + | kubectl apply -f - +#+end_src + +** Configure +This configuration: +- log using the public IP in the /X-Real-Ip/ header +- ensure that one Pod is not disrupted +- prefer each Pod to be scheduled on a different node + +#+begin_src yaml :tangle ./postgres-config.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-configuration + namespace: ft-ii-nz +stringData: + POSTGRES_DB: flattrack + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + PGDATABASE: flattrack + PGUSER: postgres +#+end_src +#+begin_src yaml :tangle ./flattrack.yaml +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: ft-ii-nz + namespace: ft-ii-nz +spec: + releaseName: ft-ii-nz + chart: + git: https://gitlab.com/flattrack/flattrack.git + ref: 0.0.1-alpha14 + path: deployments/flattrack + values: + realIPHeader: X-Real-Ip + timezone: "Pacific/Auckland" + + image: + tag: latest + # run one on each node + replicaCount: 3 + + podDisruptionBudget: + enabled: true + minAvailable: 1 + + # connect to an existing postgres database + postgres: + enabled: true + username: postgres + host: postgres + database: flattrack + passwordSecretRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + + ingress: + enabled: true + certmanager: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: ft.ii.nz + paths: + - "/" + tls: + - hosts: + - "ft.ii.nz" + secretName: letsencrypt-prod + + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: beta.kubernetes.io/arch + # operator: In + # values: + # - amd64 +#+end_src + +** Install +#+begin_src shell +kubectl apply -f ./postgres-config.yaml +kubectl -n ft-ii-nz apply -f ./postgres.yaml +until kubectl -n ft-ii-nz wait --for=condition=ready pod --selector=app=postgres --timeout=90s 2>&1 > /dev/null; do + sleep 1s; +done +sleep 3; +kubectl apply -f ./flattrack.yaml +#+end_src + +#+RESULTS: +#+begin_example +secret/postgres-configuration configured +persistentvolumeclaim/postgres-pvc unchanged +statefulset.apps/postgres unchanged +service/postgres unchanged +helmrelease.helm.fluxcd.io/ft-ii-nz configured +#+end_example + +** Observe +#+begin_src shell :wrap "SRC shell" +kubectl -n ft-ii-nz get pods,ingress -o wide +#+end_src diff --git a/ii/ft-ii-nz/flattrack.yaml b/ii/ft-ii-nz/flattrack.yaml new file mode 100644 index 0000000..92c0d76 --- /dev/null +++ b/ii/ft-ii-nz/flattrack.yaml @@ -0,0 +1,58 @@ +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: ft-ii-nz + namespace: ft-ii-nz +spec: + releaseName: ft-ii-nz + chart: + git: https://gitlab.com/flattrack/flattrack.git + ref: 0.0.1-alpha14 + path: deployments/flattrack + values: + realIPHeader: X-Real-Ip + timezone: "Pacific/Auckland" + + image: + tag: latest + # run one on each node + replicaCount: 3 + + podDisruptionBudget: + enabled: true + minAvailable: 1 + + # connect to an existing postgres database + postgres: + enabled: true + username: postgres + host: postgres + database: flattrack + passwordSecretRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + + ingress: + enabled: true + certmanager: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: ft.ii.nz + paths: + - "/" + tls: + - hosts: + - "ft.ii.nz" + secretName: letsencrypt-prod + + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: beta.kubernetes.io/arch + # operator: In + # values: + # - amd64 diff --git a/ii/ft-ii-nz/postgres-config.yaml b/ii/ft-ii-nz/postgres-config.yaml new file mode 100644 index 0000000..4c3b855 --- /dev/null +++ b/ii/ft-ii-nz/postgres-config.yaml @@ -0,0 +1,18 @@ +# Configure +# This configuration: +# - log using the public IP in the /X-Real-Ip/ header +# - ensure that one Pod is not disrupted +# - prefer each Pod to be scheduled on a different node + + +apiVersion: v1 +kind: Secret +metadata: + name: postgres-configuration + namespace: ft-ii-nz +stringData: + POSTGRES_DB: flattrack + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + PGDATABASE: flattrack + PGUSER: postgres diff --git a/ii/ft-ii-nz/postgres.yaml b/ii/ft-ii-nz/postgres.yaml new file mode 100644 index 0000000..2a5099e --- /dev/null +++ b/ii/ft-ii-nz/postgres.yaml @@ -0,0 +1,126 @@ +# Postgres +# This is a simple and locked-down Postgres deployment that it'll be reusing, until [[https://github.com/zalando/postgres-operator][Postgres-Operator]] runs in my cluster. At the moment it's quite a challenge and seems uncertain. + + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres +spec: + replicas: 1 + serviceName: "postgres" + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + restartPolicy: Always + containers: + - name: postgres + image: docker.io/postgres:12.7-alpine + securityContext: + readOnlyRootFilesystem: true + runAsUser: 70 + runAsGroup: 70 + allowPrivilegeEscalation: false + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + - name: var-run-postgresql + mountPath: /var/run/postgresql + - name: tmp + mountPath: /tmp + ports: + - containerPort: 5432 + livenessProbe: + exec: + command: + - "sh" + - "-c" + - "pg_isready" + - "-U" + - "$POSTGRES_USER" + failureThreshold: 5 + periodSeconds: 10 + timeoutSeconds: 5 + env: + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_DB + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + - name: PGDATABASE + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGDATABASE + - name: PGUSER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGUSER + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + initContainers: + - name: postgres-db-permissions-fix + image: alpine:3.12 + command: + - /bin/sh + - -c + - "/bin/chown -R 70:70 /var/lib/postgresql/data" + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + volumes: + - name: var-lib-postgresql + persistentVolumeClaim: + claimName: postgres-pvc + - name: var-run-postgresql + emptyDir: {} + - name: tmp + emptyDir: {} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: "5432" + port: 5432 + targetPort: 5432 diff --git a/ii/google/crt.org b/ii/google/crt.org index 213fa74..35012f4 100644 --- a/ii/google/crt.org +++ b/ii/google/crt.org @@ -969,7 +969,6 @@ https://hh-backend.crt.cncf.ci # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) # eval: (setenv "DNSIMPLE_TOKEN" (shell-command-to-string ". secrets.env ; echo -n $DNSIMPLE_TOKEN") -# org-confirm-babel-evaluate: nil # End: diff --git a/ii/guix.org b/ii/guix.org index f184361..2990745 100644 --- a/ii/guix.org +++ b/ii/guix.org @@ -234,5 +234,4 @@ This creates /gnu/store (see The Store) and /var/guix. The latter contains a rea # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # End: diff --git a/ii/iiphone/README.org b/ii/iiphone/README.org new file mode 100644 index 0000000..074c82a --- /dev/null +++ b/ii/iiphone/README.org @@ -0,0 +1,127 @@ +#+TITLE: iiphone +#+PROPERTY: header-args:diff+ :comments none +#+PROPERTY: header-args:dockerfile+ :comments none +#+PROPERTY: header-args:shell+ :prologue "( " :epilogue " ) 2>&1 ; :" :comments none +#+PROPERTY: header-args:text+ :comments none +#+PROPERTY: header-args:tmate+ :comments none +#+PROPERTY: header-args:yaml+ :comments none + +Exploration into something that makes +#+begin_quote +Phones that create instant infrastructure. +#+end_quote + +* Prologue +We have PinePhones, these can run OSes like PostmarketOS. +With PostmarketOS, we are able to run containerd and k8s. + +Through running software like this, we are able to enable the provisioning of local infra. + +* Downloading the OS +PostmarketOS ships two images, a standard and an installer. +As we want to be able to have the OS installed on our fast SD cards, we will use the standard image. + +Downloading an image +#+begin_src shell +VERSION=v21.06 +EDITION=phosh-9 +DATE=20210704-0528 +ASSET="${DATE}-postmarketOS-${VERSION}-${EDITION}-pine64-pinephone.img.xz" + +if [ ! -f "${HOME}/Downloads/${ASSET}" ]; then + curl -o "${HOME}/Downloads/${ASSET}" -L "https://images.postmarketos.org/bpo/${VERSION}/pine64-pinephone/phosh/${DATE}/${DATE}-postmarketOS-${VESRION}-phosh-9-pine64-pinephone.img.xz" +fi +if [ ! -f "${HOME}/Downloads/${ASSET//\.xz/}" ]; then + tar -xf "${HOME}/Downloads/${ASSET}" -C "${HOME}/Downloads" +fi +#+end_src + +* Flash the PostmarketOS image onto an SD card +Using the best utility handy to flash the /.img/ file to the SD card. + +* Install repos +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +cat << EOF >> /etc/apk/repositories +@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing/ +@community http://dl-cdn.alpinelinux.org/alpine/edge/community/ +EOF +#+end_src + +Prepare repos +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +sudo apk add kubernetes@testing cni-plugins@testing +#+end_src + +* Install a few packages onto the device +Install system packages +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +sudo apk add containerd containerd-openrc bash curl file tmate@testing +#+end_src + +Install kubeadm and such +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +sudo apk add kubelet@testing kubeadm@testing kubectl@testing +#+end_src + +* Enable and start services +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +sudo rc-update add cgroups default +sudo rc-update add containerd default +sudo rc-update add kubelet default +#+end_src + +* Reboot the device +Ensure that /cgroups/ are enabled. + +* Set /sysctl/ settings +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +cat << EOF | sudo tee /etc/modules-load.d/k8s.conf +br_netfilter +EOF + +cat << EOF | sudo tee /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +EOF +sudo sysctl +#+end_src + +* Initialise Kubernetes +Declare the /kubeadm/ configuration +#+begin_src yaml +--- +apiServer: +apiVersion: kubeadm.k8s.io/v1beta1 +clusterName: "" +controlPlaneEndpoint: "" +dns: {} +etcd: {} +kind: ClusterConfiguration +kubernetesVersion: v1.21.0 +networking: + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 +scheduler: {} + +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: "" + bindPort: 0 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +cgroupDriver: cgroupfs +#+end_src + +Let /kubeadm/ do it's thing +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +sudo kubeadm init --config kubeadm.yaml --ignore-preflight-errors=SystemVerification +#+end_src + +Install /flannel/ +#+begin_src shell :dir /ssh:user@192.168.1.237:/ :async yes +kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml +#+end_src +(TODO figure out the issue the /ip link/ config not being created, error as /Error registering network: operation not supported/) diff --git a/ii/isocket.org b/ii/isocket.org index 92e8c4a..d7364b4 100644 --- a/ii/isocket.org +++ b/ii/isocket.org @@ -420,14 +420,10 @@ We set tmate-session-prefix to ~rt-~ because the target session is usually on th This means your target session names above should start with rt- # Local Variables: -# org-babel-tmux-session-prefix: "" # org-babel-tmate-session-prefix: "" # eval: (require (quote ob-shell)) # eval: (require (quote ob-lisp)) # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# #eval: (require (quote ob-tmux)) -# #eval: (require (quote ob-tmate)) -# org-confirm-babel-evaluate: nil # End: diff --git a/ii/isorespin/cubic.org b/ii/isorespin/cubic.org index a45fcf0..f102087 100644 --- a/ii/isorespin/cubic.org +++ b/ii/isorespin/cubic.org @@ -979,6 +979,5 @@ ln -sf /etc/skel/.emacs.d /root/.emacs.d \ # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/isorespin/livecd.org b/ii/isorespin/livecd.org index 4048da3..6df6d32 100644 --- a/ii/isorespin/livecd.org +++ b/ii/isorespin/livecd.org @@ -623,6 +623,5 @@ RUN ln -sf /etc/skel/.emacs.d /root/.emacs.d \ # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/kanboard/README.org b/ii/kanboard/README.org new file mode 100644 index 0000000..eb553bd --- /dev/null +++ b/ii/kanboard/README.org @@ -0,0 +1,195 @@ +#+TITLE: Kanboard deployment + +Install local-path-provisioner +#+begin_src shell +kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/local-path-storage created +serviceaccount/local-path-provisioner-service-account created +clusterrole.rbac.authorization.k8s.io/local-path-provisioner-role created +clusterrolebinding.rbac.authorization.k8s.io/local-path-provisioner-bind created +deployment.apps/local-path-provisioner created +storageclass.storage.k8s.io/local-path created +configmap/local-path-config created +#+end_example + +Create a namespace +#+begin_src shell +kubectl create ns kanban-ii-coop +#+end_src + +#+RESULTS: +#+begin_example +namespace/kanban-ii-coop created +#+end_example + +Checkout a similar one +#+begin_src shell +kubectl -n sharingio-pair get certs sharingio-pair-letsencrypt -o yaml | osc52.sh +#+end_src + +Certs +#+begin_src yaml :tangle ./certs.yaml +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: kanban-ii-coop +spec: + acme: + email: kanban@ii.coop + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx + selector: + dnsNames: + - kanban.ii.coop +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kanban-ii-coop + namespace: kanban-ii-coop +spec: + dnsNames: + - kanban.ii.coop + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: kanban-ii-coop + secretName: letsencrypt-prod +#+end_src + +PVC +#+begin_src yaml :tangle ./kanboard-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kanboard + namespace: kanban-ii-coop +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: local-path +#+end_src + +Deployment +#+begin_src yaml :tangle ./kanboard-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + replicas: 1 + selector: + matchLabels: + app: kanboard + template: + metadata: + labels: + app: kanboard + spec: + containers: + - image: kanboard/kanboard:latest + name: kanboard + ports: + - containerPort: 80 + volumeMounts: + - name: kanboard + mountPath: /var/www/app/data + volumes: + - name: kanboard + persistentVolumeClaim: + claimName: kanboard +#+end_src + +Service +#+begin_src yaml :tangle ./kanboard-service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: kanboard +#+end_src + +Ingress +#+begin_src yaml :tangle ./kanboard-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kanboard + namespace: kanban-ii-coop +spec: + rules: + - host: kanban.ii.coop + http: + paths: + - backend: + service: + name: kanboard + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - kanban.ii.coop + secretName: letsencrypt-prod +#+end_src + +Apply all the things +#+begin_src shell +kubectl apply -f kanboard-deployment.yaml -f kanboard-pvc.yaml -f kanboard-service.yaml -f kanboard-ingress.yaml -f certs.yaml +#+end_src + +#+RESULTS: +#+begin_example +deployment.apps/kanboard unchanged +persistentvolumeclaim/kanboard unchanged +service/kanboard unchanged +ingress.networking.k8s.io/kanboard unchanged +clusterissuer.cert-manager.io/kanban-ii-coop created +certificate.cert-manager.io/kanban-ii-coop created +#+end_example + +Waiting for cert +#+begin_src shell +kubectl -n kanban-ii-coop get challenges +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +Checkout the ingress +#+begin_src shell +kubectl -n kanban-ii-coop get ing +#+end_src + +#+RESULTS: +#+begin_example +NAME CLASS HOSTS ADDRESS PORTS AGE +kanboard kanban.ii.coop 139.178.68.219 80, 443 10m +#+end_example diff --git a/ii/kanboard/certs.yaml b/ii/kanboard/certs.yaml new file mode 100644 index 0000000..aaf8db7 --- /dev/null +++ b/ii/kanboard/certs.yaml @@ -0,0 +1,36 @@ + + +# Certs + +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: kanban-ii-coop +spec: + acme: + email: kanban@ii.coop + preferredChain: "" + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx + selector: + dnsNames: + - kanban.ii.coop +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: kanban-ii-coop + namespace: kanban-ii-coop +spec: + dnsNames: + - kanban.ii.coop + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: kanban-ii-coop + secretName: letsencrypt-prod diff --git a/ii/kanboard/kanboard-deployment.yaml b/ii/kanboard/kanboard-deployment.yaml new file mode 100644 index 0000000..3a6eae1 --- /dev/null +++ b/ii/kanboard/kanboard-deployment.yaml @@ -0,0 +1,33 @@ + + +# Deployment + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + replicas: 1 + selector: + matchLabels: + app: kanboard + template: + metadata: + labels: + app: kanboard + spec: + containers: + - image: kanboard/kanboard:latest + name: kanboard + ports: + - containerPort: 80 + volumeMounts: + - name: kanboard + mountPath: /var/www/app/data + volumes: + - name: kanboard + persistentVolumeClaim: + claimName: kanboard diff --git a/ii/kanboard/kanboard-ingress.yaml b/ii/kanboard/kanboard-ingress.yaml new file mode 100644 index 0000000..c0e284b --- /dev/null +++ b/ii/kanboard/kanboard-ingress.yaml @@ -0,0 +1,25 @@ + + +# Ingress + +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kanboard + namespace: kanban-ii-coop +spec: + rules: + - host: kanban.ii.coop + http: + paths: + - backend: + service: + name: kanboard + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - kanban.ii.coop + secretName: letsencrypt-prod diff --git a/ii/kanboard/kanboard-pvc.yaml b/ii/kanboard/kanboard-pvc.yaml new file mode 100644 index 0000000..ecabe53 --- /dev/null +++ b/ii/kanboard/kanboard-pvc.yaml @@ -0,0 +1,16 @@ + + +# PVC + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: kanboard + namespace: kanban-ii-coop +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: local-path diff --git a/ii/kanboard/kanboard-service.yaml b/ii/kanboard/kanboard-service.yaml new file mode 100644 index 0000000..95ee5b3 --- /dev/null +++ b/ii/kanboard/kanboard-service.yaml @@ -0,0 +1,18 @@ + + +# Service + +apiVersion: v1 +kind: Service +metadata: + labels: + app: kanboard + name: kanboard + namespace: kanban-ii-coop +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: kanboard diff --git a/ii/kanboard/kanboard.yaml b/ii/kanboard/kanboard.yaml new file mode 100644 index 0000000..4a58568 --- /dev/null +++ b/ii/kanboard/kanboard.yaml @@ -0,0 +1,25 @@ + + +# Deployment + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kanboard + name: kanboard +spec: + replicas: 1 + selector: + matchLabels: + app: kanboard + template: + metadata: + labels: + app: kanboard + spec: + containers: + - image: kanboard/kanboard:latest + name: kanboard + containerPorts: + - port: 80 diff --git a/ii/legalhackers/better-rules.org b/ii/legalhackers/better-rules.org index 8009cb0..a8847c9 100644 --- a/ii/legalhackers/better-rules.org +++ b/ii/legalhackers/better-rules.org @@ -422,7 +422,6 @@ invalid argument "MountPropagation=true,Auditing=true" for "--feature-gates" fla # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil # End: diff --git a/ii/legalhackers/gitlab.org b/ii/legalhackers/gitlab.org index f909499..980e6bb 100644 --- a/ii/legalhackers/gitlab.org +++ b/ii/legalhackers/gitlab.org @@ -913,6 +913,5 @@ right: $right_session # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/legalhackers/legalhackers.ii.nz.org b/ii/legalhackers/legalhackers.ii.nz.org index cf86711..db1322c 100644 --- a/ii/legalhackers/legalhackers.ii.nz.org +++ b/ii/legalhackers/legalhackers.ii.nz.org @@ -325,6 +325,5 @@ kubectl get secret gitlab-gitlab-initial-root-password -ojsonpath={.data.passwor # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/legalhackers/recode.foo.bar.org b/ii/legalhackers/recode.foo.bar.org index 6919d99..a46fe72 100644 --- a/ii/legalhackers/recode.foo.bar.org +++ b/ii/legalhackers/recode.foo.bar.org @@ -1221,6 +1221,5 @@ https://gitlab.com/charts/gitlab/blob/master/doc/installation/secrets.md#registr # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/legalhackers/recode.ii.nz.example.org b/ii/legalhackers/recode.ii.nz.example.org index bd3cd4e..331caf0 100644 --- a/ii/legalhackers/recode.ii.nz.example.org +++ b/ii/legalhackers/recode.ii.nz.example.org @@ -158,5 +158,4 @@ kubectl get secret gitlab-gitlab-initial-root-password -ojsonpath={.data.passwor # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil # End: diff --git a/ii/legalhackers/recode.ii.nz.org b/ii/legalhackers/recode.ii.nz.org index 7991482..9335a0a 100644 --- a/ii/legalhackers/recode.ii.nz.org +++ b/ii/legalhackers/recode.ii.nz.org @@ -1470,6 +1470,5 @@ A script to generate them exists here: # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/legalhackers/rook.org b/ii/legalhackers/rook.org index 81eb578..8ae7702 100644 --- a/ii/legalhackers/rook.org +++ b/ii/legalhackers/rook.org @@ -302,6 +302,5 @@ Beware your copy paste methods and try pasting into a notepad first. # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/ii/local-cluster/README.org b/ii/local-cluster/README.org new file mode 100644 index 0000000..307d61d --- /dev/null +++ b/ii/local-cluster/README.org @@ -0,0 +1,1608 @@ +#+TITLE: Local cluster +#+PROPERTY: header-args:diff+ :comments none +#+PROPERTY: header-args:dockerfile+ :comments none +#+PROPERTY: header-args:shell+ :prologue "( " :epilogue " ) 2>&1 ; :" :comments none +#+PROPERTY: header-args:text+ :comments none +#+PROPERTY: header-args:tmate+ :comments none +#+PROPERTY: header-args:yaml+ :comments none + +Setting up a local cluster. + +* Prologue +* Prepare +** Save repo location +#+begin_src tmate :window prepare +export REPO_ROOT="${PWD}" +#+end_src + +** Downloading a Talos RPi image +Download the Talos image to flash to a MicroSD card from GitHub +#+begin_src tmate :window prepare +cd $(mktemp -d) +curl -O -L \ + https://github.com/talos-systems/talos/releases/download/v0.10.4/metal-rpi_4-arm64.img.xz +export TALOS_METAL_RPI_IMG=${PWD}/* +#+end_src + +Some Pis may require having the EEPROM updated, check [[https://www.talos.dev/docs/v0.10/single-board-computers/rpi_4/#updating-the-eeprom][the Talos docs]]. + +** Prepare MicroSD cards +Write the image to a MicroSD card +#+begin_src tmate :window prepare +export DISK_TO_USE_DEFAULT=/dev/sdb && \ + read -p "Enter the disk to use (default: '${DISK_TO_USE}'): " DISK_TO_USE && \ + sudo dd \ + if=${TALOS_METAL_RPI_IMG} \ + of="${DISK_TO_USE-$DISK_TO_USE_DEFAULT}" \ + status=progress \ + conv=fsync \ + bs=4M +#+end_src + +** Install =talosctl= +To manage Talos on each node, =talosctl= is used to provision and manage +#+begin_src tmate :window prepare +curl -o ~/bin/talosctl -L \ + https://github.com/talos-systems/talos/releases/download/v0.11.0/talosctl-$(uname | tr '[:upper:]' '[:lower:]')-amd64 +chmod +x ~/bin/talosctl +#+end_src + +* Set up +** kind +Declare the kind config +#+begin_src yaml :tangle ./kind-config.yaml +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + extraPortMappings: + - containerPort: 67 + hostPort: 67 + protocol: UDP + - containerPort: 69 + hostPort: 69 + protocol: UDP + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP + - containerPort: 4011 + hostPort: 4011 + protocol: UDP + - containerPort: 7472 + hostPort: 7472 + protocol: UDP + - containerPort: 8081 + hostPort: 8081 + protocol: TCP + kubeadmConfigPatches: + - | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" +#+end_src + +Using kind /v0.11.1/ or greater, launch the cluster +#+begin_src tmate :window prepare +export HOST_IP=$(ip a | grep 192.168 | awk '{print $2}' | cut -d '/' -f1 | head -n 1) +kind create cluster --config <(envsubst < ./kind-config.yaml) +#+end_src + +#+begin_src tmate :window prepare +export HOST_IP=$(ip a | grep 192.168 | awk '{print $2}' | cut -d '/' -f1 | head -n 1) +talosctl cluster create \ + -p 67:67/udp,69:69/udp,80:80/tcp,443:443/tcp,4011:4011/udp,7472:7472/tcp,8081:8081/tcp \ + --workers 0 \ + --config-patch '[{"op": "add", "path": "/cluster/allowSchedulingOnMasters", "value": true}]' \ + --endpoint "${HOST_IP}" +#+end_src + +Install nginx-ingress-controller +#+begin_src shell +VERSION=$(curl -s https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/stable.txt) +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/${VERSION}/deploy/static/provider/kind/deploy.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/ingress-nginx created +serviceaccount/ingress-nginx created +configmap/ingress-nginx-controller created +clusterrole.rbac.authorization.k8s.io/ingress-nginx created +clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created +role.rbac.authorization.k8s.io/ingress-nginx created +rolebinding.rbac.authorization.k8s.io/ingress-nginx created +service/ingress-nginx-controller-admission created +service/ingress-nginx-controller created +deployment.apps/ingress-nginx-controller created +validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created +serviceaccount/ingress-nginx-admission created +clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created +clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created +role.rbac.authorization.k8s.io/ingress-nginx-admission created +rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created +job.batch/ingress-nginx-admission-create created +job.batch/ingress-nginx-admission-patch created +#+end_example + +Set /NODE_ADDRS/ var +#+begin_src tmate :window prepare +export NODE_ADDRS=$(ip a | grep 192.168 | awk '{print $2}' | cut -d '/' -f1 | head -n 1) +#+end_src + +** a Talos node +*** Discover node IPs +Each node that comes up will, of course, have an IP address. +I'm checking what the router says. + +*** Determine that nodes are live +#+begin_src tmate :window prepare +#export NODE_ADDRS=(192.168.1.111 192.168.1.127 192.168.1.234) +export NODE_ADDRS=(192.168.1.234) +#+end_src + +*** Ensure nodes are live +#+begin_src tmate :window prepare +for IP in ${NODE_ADDRS[*]}; do + echo "Checking ${IP}:50000" + nc -zv "${IP}" "50000" +done +#+end_src + +*** Generating the configuration +#+begin_src tmate :window prepare +talosctl gen config \ + ii-nz \ + https://192.168.1.100:6443 \ + --output-dir talos/ \ + --additional-sans k8s.ii.nz \ + --install-disk /dev/mmcblk0 \ + --install-image ghcr.io/talos-systems/installer:v0.10.3 +#+end_src + +*** Modify the configuration +#+begin_src diff :tangle talos-config-patches.patch :comment none +diff --git a/talos/controlplane.yaml b/talos/controlplane.yaml +index bc87738..cf17a8a 100644 +--- a/talos/controlplane.yaml ++++ b/talos/controlplane.yaml +@@ -35,7 +35,12 @@ machine: + # - rw + + # Provides machine specific network configuration options. +- network: {} ++ network: ++ interfaces: ++ - interface: eth0 ++ dhcp: true ++ vip: ++ ip: 192.168.1.100 + # # `interfaces` is used to define the network interface configuration. + # interfaces: + # - interface: eth0 # The interface name. +@@ -214,6 +219,7 @@ machine: + # slot: 0 # Key slot number for luks2 encryption. + # Provides cluster specific configuration options. + cluster: ++ allowSchedulingOnMasters: true + # Provides control plane specific configuration options. + controlPlane: + endpoint: https://192.168.1.100:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname. +diff --git a/talos/init.yaml b/talos/init.yaml +index ad6d34e..46bdafd 100644 +--- a/talos/init.yaml ++++ b/talos/init.yaml +@@ -35,7 +35,12 @@ machine: + # - rw + + # Provides machine specific network configuration options. +- network: {} ++ network: ++ interfaces: ++ - interface: eth0 ++ dhcp: true ++ vip: ++ ip: 192.168.1.100 + # # `interfaces` is used to define the network interface configuration. + # interfaces: + # - interface: eth0 # The interface name. +@@ -214,6 +219,7 @@ machine: + # slot: 0 # Key slot number for luks2 encryption. + # Provides cluster specific configuration options. + cluster: ++ allowSchedulingOnMasters: true + # Provides control plane specific configuration options. + controlPlane: + endpoint: https://192.168.1.100:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname. + +#+end_src + +Apply patches +#+begin_src tmate :window prepare +patch -ruN -d talos/ < "${REPO_ROOT}/talos-config-patches.patch" +#+end_src + +*** Use talosconfig +#+NAME: export-talosconfig +#+begin_src tmate :window prepare +export TALOSCONFIG=$PWD/talos/talosconfig +#+end_src + +Write the endpoint +#+begin_src tmate :window prepare +talosctl config endpoint 192.168.1.100 +#+end_src + +*** Provisioning the first node +#+begin_src tmate :window prepare +talosctl apply-config --insecure --nodes "${NODE_ADDRS[0]}" --file talos/init.yaml +#+end_src + +**** Ensure that the node is active +#+begin_src tmate :window prepare +talosctl health -e "${NODE_ADDRS[0]}" -n "${NODE_ADDRS[0]}" +#+end_src + +*** Provision all the nodes +#+begin_src tmate :window prepare +for IP in ${NODE_ADDRS[*]}; do + talosctl apply-config --insecure --nodes "${IP}" --file talos/controlplane.yaml +done +#+end_src + +**** Watch the health of all nodes, as they become active +#+begin_src tmate :window prepare +talosctl health -e "${NODE_ADDRS[0]}" -n "${NODE_ADDRS[0]}" +#+end_src + +*** Get kubeconfig +#+begin_src tmate :window prepare +talosctl kubeconfig -e 192.168.1.100 -n 192.168.1.100 +#+end_src + +*** Get nodes +#+begin_src shell +kubectl get nodes +#+end_src + +#+RESULTS: +#+begin_example +NAME STATUS ROLES AGE VERSION +talos-192-168-1-111 Ready control-plane,master 16m v1.21.1 +talos-192-168-1-127 Ready control-plane,master 8m2s v1.21.1 +talos-192-168-1-234 Ready control-plane,master 7m43s v1.21.1 +#+end_example + +* Validate +** Get pods +#+begin_src shell +kubectl get pods -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-fcc4c97fb-br6rd 1/1 Running 0 17m +kube-system coredns-fcc4c97fb-cfstz 1/1 Running 0 17m +kube-system kube-apiserver-talos-192-168-1-111 1/1 Running 0 14m +kube-system kube-apiserver-talos-192-168-1-127 1/1 Running 0 7m23s +kube-system kube-apiserver-talos-192-168-1-234 1/1 Running 0 7m55s +kube-system kube-controller-manager-talos-192-168-1-111 1/1 Running 3 15m +kube-system kube-controller-manager-talos-192-168-1-127 1/1 Running 0 7m23s +kube-system kube-controller-manager-talos-192-168-1-234 1/1 Running 0 7m55s +kube-system kube-flannel-5stx9 1/1 Running 0 8m16s +kube-system kube-flannel-9kcx2 1/1 Running 0 7m56s +kube-system kube-flannel-wxn5m 1/1 Running 0 16m +kube-system kube-proxy-6dzrl 1/1 Running 0 7m56s +kube-system kube-proxy-pb42s 1/1 Running 0 8m16s +kube-system kube-proxy-w5q56 1/1 Running 0 16m +kube-system kube-scheduler-talos-192-168-1-111 1/1 Running 3 15m +kube-system kube-scheduler-talos-192-168-1-127 1/1 Running 0 7m23s +kube-system kube-scheduler-talos-192-168-1-234 1/1 Running 0 7m55s +#+end_example + +* Ensure set up +** Upload talos folder into Kubernetes secret +#+begin_src tmate :window prepare +kubectl -n kube-system create secret generic "talos-config" --from-file=talos/ +#+end_src + +Ensure that the files exist in the secret +#+begin_src shell +kubectl -n kube-system get secret talos-config -o yaml | yq e '.data | keys | .[]' -P - +#+end_src + +#+RESULTS: +#+begin_example +controlplane.yaml +init.yaml +join.yaml +talosconfig +#+end_example + +** Fetch Talos configs +Create a new temp directory +#+begin_src tmate :window prepare +cd $(mktemp -d) +#+end_src + +Extract talos-config into directory +#+begin_src tmate :window prepare :noweb yes +TALOS_CONFIGS="$(mktemp -t talos-config-XXXXX)" +kubectl -n kube-system get secret talos-config -o yaml > "${TALOS_CONFIGS}" + +mkdir -p talos/ +for FILE in $(cat "${TALOS_CONFIGS}" | yq e '.data | keys | .[]' -P -); do + echo $FILE + cat "${TALOS_CONFIGS}" | yq e ".data.\"${FILE}\"" -P - | base64 --decode > "talos/${FILE}" +done +<> +#+end_src + +** Get node IPs from the cluster +#+begin_src tmate :window prepare +export NODE_ADDRS=$(kubectl get nodes -o yaml | yq e '.items[].status.addresses[] | select(.type=="InternalIP") | .address' -P -) +#+end_src + +** Get the TalosConfig +#+begin_src tmate :window prepare +export TALOSCONFIG=$(mktemp /tmp/tmp.XXXXX) +kubectl -n local-clusters get talosconfig -l cluster.x-k8s.io/cluster-name=local-cluster-mgmt -o=jsonpath='{.items[0].status.talosConfig}' > "${TALOSCONFIG}" +#+end_src + +** Get machinetype +#+begin_src tmate :window prepare +talosctl -e 192.168.1.100 -n "$(echo ${NODE_ADDRS} | tr ' ' ',')" get machinetype +#+end_src + +** Shutdown RPis +#+begin_src tmate :window prepare +for IP in ${NODE_ADDRS[*]}; do + talosctl shutdown -e 192.168.1.100 -n "${IP}" +done +#+end_src + +** Reset all nodes to uninitialised Talos +#+begin_src tmate :window prepare +read -p "Are you sure you want to reset all nodes, effectively destroying the cluster? [Enter|C-c] " && \ +( + for IP in ${NODE_ADDRS[*]}; do + talosctl -e "${IP}" -n "${IP}" reset --graceful=false --reboot --system-labels-to-wipe=EPHEMERAL,STATE + done +) +#+end_src + +* Workloads +** metallb +*** Prepare +Create a directory for the manifests and a namespace for the resources +#+begin_src shell :results silent +mkdir -p metallb +curl -o metallb/namespace.yaml -L https://raw.githubusercontent.com/metallb/metallb/v0.9.6/manifests/namespace.yaml +curl -o metallb/metallb.yaml -L https://raw.githubusercontent.com/metallb/metallb/v0.9.6/manifests/metallb.yaml +#+end_src + +*** Configure +Using layer2 for ARP capabilities and provide a very sufficient 10 IP address range in a part of the network that is configure to not be used by DHCP. +#+begin_src yaml :tangle ./metallb/config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - 192.168.1.20-192.168.1.30 +#+end_src + +*** Install +#+begin_src shell +kubectl apply -f metallb/namespace.yaml +kubectl -n metallb-system get secret memberlist 2> /dev/null \ + || kubectl -n metallb-system create secret generic memberlist --from-literal=secretkey="$(openssl rand -base64 128)" +kubectl -n metallb-system apply -f ./metallb/config.yaml +kubectl -n metallb-system apply -f ./metallb/metallb.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/metallb-system created +secret/memberlist created +configmap/config created +Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +podsecuritypolicy.policy/controller created +podsecuritypolicy.policy/speaker created +serviceaccount/controller created +serviceaccount/speaker created +clusterrole.rbac.authorization.k8s.io/metallb-system:controller created +clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created +role.rbac.authorization.k8s.io/config-watcher created +role.rbac.authorization.k8s.io/pod-lister created +clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created +clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created +rolebinding.rbac.authorization.k8s.io/config-watcher created +rolebinding.rbac.authorization.k8s.io/pod-lister created +daemonset.apps/speaker created +deployment.apps/controller created +#+end_example + +** Helm-Operator +Unfortunately the Helm-Operator project by FluxCD is both in maintenance mode and unsupported on arm64. Here in the prepare stage, I'm patching the current state of how things are to build an arm64 image. Ideally, this is all in a single Dockerfile and does not use Make scripts. I'm unsure what the future of Helm-Operator is, but I'd like to see and help support for architectures outta-the-box. + +*** Prepare +Create a directory for the manifests and a namespace for the resources +#+begin_src shell :results silent +mkdir -p helm-operator +kubectl create namespace helm-operator --dry-run=client -o yaml \ + | kubectl apply -f - +#+end_src + +*** Configure +Create local manifests to apply in the cluster +#+begin_src shell :results silent +curl -o ./helm-operator/helm-operator-crds.yaml -L https://raw.githubusercontent.com/fluxcd/helm-operator/1.2.0/deploy/crds.yaml + +helm repo add fluxcd https://charts.fluxcd.io +helm template helm-operator --create-namespace fluxcd/helm-operator \ + --namespace helm-operator \ + --set helm.versions=v3 \ + --set image.repository=registry.gitlab.com/bobymcbobs/container-images/helm-operator \ + --set image.tag=1.2.0 \ + > ./helm-operator/helm-operator.yaml +#+end_src + +*** Install +#+begin_src shell +kubectl apply -f ./helm-operator/helm-operator-crds.yaml +kubectl -n helm-operator apply -f ./helm-operator/helm-operator.yaml +#+end_src + +#+RESULTS: +#+begin_example +Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition +customresourcedefinition.apiextensions.k8s.io/helmreleases.helm.fluxcd.io configured +serviceaccount/helm-operator unchanged +secret/helm-operator-git-deploy unchanged +configmap/helm-operator-kube-config unchanged +Warning: rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole +clusterrole.rbac.authorization.k8s.io/helm-operator unchanged +Warning: rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding +clusterrolebinding.rbac.authorization.k8s.io/helm-operator unchanged +service/helm-operator unchanged +deployment.apps/helm-operator configured +#+end_example + +** nginx-ingress controller +*** Prepare + +Create a directory for the manifests and a namespace for the resources +#+begin_src shell :results silent +mkdir -p nginx-ingress +kubectl create namespace nginx-ingress --dry-run=client -o yaml \ + | kubectl apply -f - +#+end_src + +*** Configure +Ensuring that remote IP addresses will be forwarded as headers in the requests, using the fields in the /.spec.values.controller.service/ field. +Preferring that each nginx-ingress pod runs on a different node. +#+begin_src yaml :tangle ./nginx-ingress/nginx-ingress.yaml +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + releaseName: nginx-ingress + chart: + repository: https://kubernetes.github.io/ingress-nginx + name: ingress-nginx + version: 3.30.0 + values: + controller: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + topologyKey: "kubernetes.io/hostname" + service: + type: LoadBalancer + externalTrafficPolicy: Local + defaultBackend: + enabled: false +#+end_src + +*** Install +#+begin_src shell +kubectl -n nginx-ingress apply -f nginx-ingress/nginx-ingress.yaml +#+end_src + +#+RESULTS: +#+begin_example +helmrelease.helm.fluxcd.io/nginx-ingress created +#+end_example + +** local-path-provisioner +Currently used, to get-the-job-done. +My end goal is to use Rook+Ceph in-place, but I'm starting with this. + +*** Prepare +Create a directory for the manifests and a namespace for the resources. +#+begin_src shell :results silent +mkdir -p local-path-provisioner +curl -o local-path-provisioner/local-path-provisioner.yaml -L https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml +#+end_src + +*** Install +#+begin_src shell +kubectl apply -f local-path-provisioner/local-path-provisioner.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/local-path-storage created +serviceaccount/local-path-provisioner-service-account created +clusterrole.rbac.authorization.k8s.io/local-path-provisioner-role created +clusterrolebinding.rbac.authorization.k8s.io/local-path-provisioner-bind created +deployment.apps/local-path-provisioner created +storageclass.storage.k8s.io/local-path created +configmap/local-path-config created +#+end_example + +*** Finalise +Ensuring that local-path is the default StorageClass. +#+begin_src shell +kubectl patch storageclasses.storage.k8s.io local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' +#+end_src + +#+RESULTS: +#+begin_example +storageclass.storage.k8s.io/local-path patched +#+end_example + +** CAPI + Sidero +Links: +- https://www.sidero.dev/docs/v0.3/getting-started/install-clusterapi/ +- https://www.sidero.dev/docs/v0.3/guides/rpi4-as-servers/#rpi4-boot-process + +*** Configure +#+begin_src yaml :tangle ./sidero-controller-manager-debug.yaml +apiVersion: v1 +kind: Pod +metadata: + name: sidero-debug + namespace: sidero-system +spec: + hostNetwork: true + containers: + - image: alpine:3.12 + name: sidero-debug + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/sidero/tftp + name: tftp-folder + command: + - sh + - -c + - apk add tar && sleep infinity + volumes: + - name: tftp-folder + persistentVolumeClaim: + claimName: sidero-tftp +#+end_src +#+begin_src yaml :tangle ./sidero-controller-manager-tftp-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sidero-tftp + namespace: sidero-system +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +#+end_src +#+begin_src yaml :tangle ./sidero-controller-manager-patch.yaml +spec: + template: + spec: + volumes: + - name: tftp-folder + persistentVolumeClaim: + claimName: sidero-tftp + containers: + - name: manager + volumeMounts: + - mountPath: /var/lib/sidero/tftp + name: tftp-folder +#+end_src +- TODO Sidero TFTP, for UEFI boot + - share the /var/lib/sidero/tftp folder as a PVC with a alpine pod +- TODO copy UEFI boot into TFTP folder and RPI_EFI.fd from SD card + +*** Install +#+begin_src tmate :window prepare +export SIDERO_METADATA_SERVER_HOST_NETWORK=true \ + SIDERO_METADATA_SERVER_PORT=9091 \ + SIDERO_CONTROLLER_MANAGER_API_ENDPOINT="${HOST_IP:-192.168.1.21}" \ + SIDERO_CONTROLLER_MANAGER_AUTO_ACCEPT_SERVERS=true \ + SIDERO_CONTROLLER_MANAGER_HOST_NETWORK=false \ + SIDERO_CONTROLLER_MANAGER_BOOT_FROM_DISK_METHOD=http-404 + +clusterctl init -b talos -c talos -i sidero:v0.3.0 +#+end_src + +*** Finalise +(metal-only) Assign a virtal IP to the sidero-http service +#+begin_src shell +kubectl -n sidero-system patch service sidero-http -p '{"spec":{"type":"LoadBalancer"}}' +#+end_src + +#+RESULTS: +#+begin_example +service/sidero-http patched +#+end_example + +(kind-only) Assign an external IP to sidero-http +#+begin_src shell +export KIND_IP="$(docker inspect kind-control-plane -f '{{.NetworkSettings.Networks.kind.IPAddress}}')" +kubectl -n sidero-system patch service sidero-http -p "{\"spec\":{\"externalIPs\":[\"${KIND_IP}\"]}}" +#+end_src + +#+RESULTS: +#+begin_example +service/sidero-http patched +#+end_example + +Check the IP address +#+begin_src shell +kubectl -n sidero-system get svc +#+end_src + +#+RESULTS: +#+begin_example +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +sidero-controller-manager-metrics-service ClusterIP 10.100.182.164 8443/TCP 77s +sidero-http LoadBalancer 10.105.234.143 192.168.1.21 8081:30367/TCP 77s +sidero-tftp ClusterIP 10.100.74.148 69/UDP 77s +#+end_example + +Expose Sidero-HTTP as a HTTPs Ingress +#+begin_src yaml :tangle ./ingress-boot-ii-nz.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: boot-ii-nz + namespace: sidero-system +spec: + rules: + - host: boot.ii.nz + http: + paths: + - backend: + service: + name: sidero-http + port: + number: 8081 + path: / + pathType: ImplementationSpecific +#+end_src + +Apply the ingress +#+begin_src shell +kubectl apply -f ./ingress-boot-ii-nz.yaml +#+end_src + +#+RESULTS: +#+begin_example +ingress.networking.k8s.io/boot-ii-nz created +#+end_example + +Create a PVC for the TFTP folder +#+begin_src shell +# TODO figure out how to use dnsmasq/dhcp for just PXE and Sidero CM for TFTP +kubectl apply -f ./sidero-controller-manager-tftp-pvc.yaml +kubectl -n sidero-system patch deployment sidero-controller-manager --patch-file ./sidero-controller-manager-patch.yaml +kubectl -n sidero-system delete pod -l app=sidero 2> /dev/null +#+end_src + +#+RESULTS: +#+begin_example +persistentvolumeclaim/sidero-tftp created +deployment.apps/sidero-controller-manager patched +pod "caps-controller-manager-5948c84db7-vbwhm" deleted +pod "sidero-controller-manager-565796bc46-9xzhx" deleted +pod "sidero-controller-manager-76c76bdc8d-s2f7q" deleted +#+end_example + +Create a Pod that's also got the TFTP mount +#+begin_src shell +kubectl -n sidero-system delete pod sidero-debug 2> /dev/null +kubectl apply -f ./sidero-controller-manager-debug.yaml +#+end_src + +#+RESULTS: +#+begin_example +pod/sidero-debug created +#+end_example + +Czech the content +#+begin_src shell +kubectl -n sidero-system exec -it sidero-debug -- ls -alh /var/lib/sidero/tftp/ +#+end_src + +#+RESULTS: +#+begin_example +Unable to use a TTY - input is not a terminal or the right kind of file +total 2M +drwxrwxrwx 2 root root 6 Jul 8 20:49 . +drwxr-xr-x 3 root root 3 Jul 8 20:49 .. +-rw-r--r-- 1 root root 968.6K Jul 8 20:49 ipxe-arm64.efi +-rw-r--r-- 1 root root 996.5K Jul 8 20:49 ipxe.efi +-rw-r--r-- 1 root root 81.0K Jul 8 20:49 undionly.kpxe +-rw-r--r-- 1 root root 81.0K Jul 8 20:49 undionly.kpxe.0 +#+end_example + +Copy assets in-place +#+begin_src tmate :window prepare +kubectl -n sidero-system cp sidero-debug:/var/lib/sidero /tmp/ +#+end_src +(this will be used for uploading the TFTP root for DNSMASQ) + +*** Debug +Logs +#+begin_src tmate :window sidero +kubectl -n sidero-system logs -l app=sidero -f +#+end_src + +Scale to zero +#+begin_src shell +kubectl -n sidero-system scale deployment sidero-controller-manager --replicas=0 +#+end_src + +#+RESULTS: +#+begin_example +deployment.apps/sidero-controller-manager scaled +#+end_example + +Scale to one +#+begin_src shell +kubectl -n sidero-system scale deployment sidero-controller-manager --replicas=1 +#+end_src + +*** Remove +#+begin_src tmate :window prepare +clusterctl delete --all +#+end_src +(useful for iterating) + +** PXE boot server (dnsmasq) +*** Prepare +#+begin_src shell :results silent +mkdir -p dnsmasq +kubectl create namespace dnsmasq --dry-run=client -o yaml | \ + kubectl apply -f - +#+end_src + +*** Configure +Configure dnsmasq +#+begin_src text :tangle ./dnsmasq/dnsmasq.conf :comments none +#dnsmasq config, for a complete example, see: +# http://oss.segetech.com/intra/srv/dnsmasq.conf + +port=0 +dhcp-range=${DHCP_RANGE},proxy +pxe-service=0,"Raspberry Pi Boot" +pxe-prompt="PXE booting Talos from Sidero in",0 +dhcp-boot=ipxe-arm64.efi,sidero +log-queries +log-dhcp + +enable-tftp=* +tftp-root=/var/lib/sidero/tftp +#+end_src + +Configure the container +#+begin_src dockerfile :tangle ./dnsmasq/Dockerfile :comments none +FROM alpine:3.12 AS final +RUN apk add --no-cache tcpdump curl dnsmasq-dnssec gettext bash +# TODO run as non-root +RUN mkdir -p /etc/default/ && \ + echo -e "ENABLED=1\nIGNORE_RESOLVCONF=yes" > /etc/default/dnsmasq +ENTRYPOINT ["dnsmasq","--no-daemon"] +#+end_src + +TFTP PVC +#+begin_src yaml :tangle ./dnsmasq/dnsmasq-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: dnsmasq-tftp + namespace: dnsmasq +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +#+end_src + +Configure the deployment +#+begin_src yaml :tangle ./dnsmasq/dnsmasq.yaml :comments none +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dnsmasq + namespace: dnsmasq + labels: + nz.ii: dnsmasq + app: dnsmasq +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + nz.ii: dnsmasq + template: + metadata: + annotations: + nz.ii/dnsmasq.conf-sha256sum: "${DNSMASQ_CONF_HASH}" + nz.ii/dockerfile-sha256sum: "${DOCKERFILE_HASH}" + labels: + nz.ii: dnsmasq + app: dnsmasq + spec: + hostNetwork: true + containers: + - name: dnsmasq + image: registry.gitlab.com/ii/nz/dnsmasq:latest + imagePullPolicy: Always + volumeMounts: + - name: config + mountPath: /etc/dnsmasq + - name: tftp-folder + mountPath: /var/lib/sidero/tftp + env: + - name: DHCP_RANGE + value: "${DHCP_RANGE}" + command: + - bash + - -x + - -c + - dnsmasq --no-daemon -C <(envsubst < /etc/dnsmasq/dnsmasq.conf) + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_ADMIN + ports: + - containerPort: 67 + hostPort: 67 + protocol: UDP + - containerPort: 4011 + hostPort: 4011 + protocol: UDP + - containerPort: 7472 + hostPort: 7472 + protocol: UDP + volumes: + - name: config + configMap: + name: dnsmasq-config + - name: tftp-folder + persistentVolumeClaim: + claimName: dnsmasq-tftp +#+end_src + +*** Build +**** Build for the target architecture +#+begin_src tmate :window dnsmasq +kubectl build \ + --destination registry.gitlab.com/ii/nz/dnsmasq:latest \ + --snapshotMode=redo \ + --context=$PWD \ + --dockerfile ./dnsmasq/Dockerfile +#+end_src + +**** Build for /amd64/ and /arm64/ +Prepare (1/2): prepare binfmt files +#+begin_src tmate :window dnsmasq +docker run --privileged --rm tonistiigi/binfmt --install all +#+end_src + +Prepare (2/2): create a builder +#+begin_src tmate :window dnsmasq +docker buildx create --use +#+end_src + +Build +#+begin_src tmate :window dnsmasq +docker buildx build \ + --platform linux/arm64/v8,linux/amd64 \ + --push \ + --tag registry.gitlab.com/ii/nz/dnsmasq:latest \ + --file ./dnsmasq/Dockerfile \ + ./dnsmasq/ +#+end_src +(dependencies: docker-ce=>20.10.6, docker-buildx=>0.3.1, qemu-user-static, binfmt-support) + +*** Install +#+begin_src shell +kubectl -n dnsmasq create configmap dnsmasq-config --from-file=dnsmasq/dnsmasq.conf --dry-run=client -o yaml | \ + kubectl apply -f - +export DNSMASQ_CONF_HASH="$(sha256sum ./dnsmasq/dnsmasq.conf | awk '{print $1}')" +export DOCKERFILE_HASH="$(sha256sum ./dnsmasq/Dockerfile | awk '{print $1}')" +if [ "$(kubectl config current-context)" = "kind-kind" ]; then + DHCP_RANGE="$(docker network inspect -f '{{json .IPAM.Config}}' kind | jq -r .[0].Subnet | cut -d / -f1)" +fi +export DHCP_RANGE="${DHCP_RANGE:-192.168.1.0}" +echo "DHCP_RANGE: ${DHCP_RANGE}" +kubectl apply -f ./dnsmasq/dnsmasq-pvc.yaml +envsubst < ./dnsmasq/dnsmasq.yaml | kubectl apply -f - +#+end_src + +#+RESULTS: +#+begin_example +configmap/dnsmasq-config configured +DHCP_RANGE: 192.168.1.0 +persistentvolumeclaim/dnsmasq-tftp unchanged +deployment.apps/dnsmasq unchanged +#+end_example + +*** Validate +Get logs +#+begin_src tmate :window dnsmasq +kubectl -n dnsmasq logs -l app=dnsmasq --prefix -f +#+end_src + +#+begin_src tmate :window prepare +PORTS=(67 69) +for IP in ${NODE_ADDRS[*]}; do + for PORT in ${PORTS[*]}; do + echo "Checking ${IP}:${PORT}" + nc -zvu "${IP}" "${PORT}" || " port '${PORT}' inaccessible" + done +done +#+end_src + +#+begin_src yaml :tangle ./dnsmasq/debug-pod.yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + run: dnsmasq-debug + name: dnsmasq-debug + namespace: dnsmasq +spec: + hostNetwork: true + containers: + - image: alpine:3.12 + name: dnsmasq-debug + securityContext: + privileged: true + command: + - sleep + - infinity + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - dnsmasq + topologyKey: "kubernetes.io/hostname" + dnsPolicy: ClusterFirst + restartPolicy: Always +#+end_src + +#+begin_src shell +kubectl delete -f ./dnsmasq/debug-pod.yaml +#+end_src + +Drop a shell +#+begin_src tmate :window tcpdump +kubectl -n dnsmasq exec -it dnsmasq-debug -- sh +#+end_src + +Install tcpdump +#+begin_src tmate :window tcpdump +apk add tcpdump +#+end_src + +List interfaces +#+begin_src tmate :window tcpdump +ip a +#+end_src + +We'll use eth0, since that's the host network for the Pi + +Scale to zero +#+begin_src shell +kubectl -n dnsmasq scale deployment dnsmasq --replicas=0 +#+end_src + +#+RESULTS: +#+begin_example +deployment.apps/dnsmasq scaled +#+end_example + +Scale to one +#+begin_src shell +kubectl -n dnsmasq scale deployment dnsmasq --replicas=1 +#+end_src + +#+RESULTS: +#+begin_example +deployment.apps/dnsmasq scaled +#+end_example + +*** Prepare assets +Download UEFI assets +#+begin_src tmate :window prepare +FILES="start4.elf fixup4.dat config.txt ipxe.efi ipxe-arm64.efi firmware overlays bcm2711-rpi-4-b.dtb" + +cd /var/lib/sidero/tftp +for UUID in ${UUIDs}; do + echo "${UUID}:" + mkdir -p ${UUID} + cd ${UUID} + rm * + for FILE in $FILES; do + echo "- ${FILE}" + ln -sf ../$FILE ./$FILE + done + cd - +done +VERSION=v1.28 +ASSET=RPi4_UEFI_Firmware_${VERSION}.zip +EXTRACTED_DIR=/tmp/tftp +rm -rf "${EXTRACTED_DIR}" +if [ ! -f "${HOME}/Downloads/${ASSET}" ]; then + curl -o ${HOME}/Downloads/${ASSET} -L https://github.com/pftf/RPi4/releases/download/${VERSION}/${ASSET} +fi +mkdir -p "${EXTRACTED_DIR}" +unzip -o "${HOME}/Downloads/${ASSET}" -d "${EXTRACTED_DIR}" + +for _SERIAL in servers/*; do + SERIAL="${_SERIAL/servers\//}" + echo "${SERIAL}:" + mkdir -p "${EXTRACTED_DIR}/${SERIAL}" + cp -f "${_SERIAL}/RPI_EFI.fd" "${EXTRACTED_DIR}/${SERIAL}/" + cp -a tftp-root/SERIAL/* /tmp/tftp/${SERIAL}/ +done +ls -alhR /tmp/tftp +#+end_src + +*** Copy TFTP contents to dnsmasq + +Copy TFTP folder from sidero-controller-manager into dnsmasq's TFTP folder +#+begin_src shell +# echo "Copying TFTP out from Sidero Controller Manager" +# SCM_TFTP_FOLDER=/tmp +# kubectl -n sidero-system cp sidero-debug:/var/lib/sidero/tftp "${SCM_TFTP_FOLDER}/tftp" +# echo "Local contents of ${SCM_TFTP_FOLDER}/tftp" +# ls -alh "${SCM_TFTP_FOLDER}/tftp/" + +echo "Copying local contents to dnsmasq" +DNSMASQ_POD_NAME="$(kubectl -n dnsmasq get pods -l app=dnsmasq -o=jsonpath='{.items[0].metadata.name}')" +kubectl -n dnsmasq exec -it "${DNSMASQ_POD_NAME}" -- rm -rf /var/lib/sidero/tftp/* +kubectl -n dnsmasq cp "/tmp/tftp" "${DNSMASQ_POD_NAME}":/var/lib/sidero/ +kubectl -n dnsmasq exec -it "${DNSMASQ_POD_NAME}" -- ls -alh /var/lib/sidero/tftp +#+end_src + +#+RESULTS: +#+begin_example +Copying local contents to dnsmasq +Unable to use a TTY - input is not a terminal or the right kind of file +Unable to use a TTY - input is not a terminal or the right kind of file +total 2M +drwxrwxrwx 17 root root 29 Jul 9 04:26 . +drwxr-xr-x 3 root root 3 Jul 8 20:50 .. +drwxr-xr-x 2 root root 11 Jul 9 04:26 136c6fe1 +drwxr-xr-x 2 root root 11 Jul 9 04:26 1f8570e2 +drwxr-xr-x 2 root root 11 Jul 9 04:26 2bbd241a +drwxr-xr-x 2 root root 11 Jul 9 04:26 2cb186c5 +drwxr-xr-x 2 root root 11 Jul 9 04:26 407d7434 +drwxr-xr-x 2 root root 11 Jul 9 04:26 4b1fcf44 +-rw-r--r-- 1 1000 1000 1.9M Jul 9 04:26 RPI_EFI.fd +-rw-r--r-- 1 1000 1000 5.3K Jul 9 04:26 Readme.md +drwxr-xr-x 2 root root 11 Jul 9 04:26 bc3ebf28 +drwxr-xr-x 2 root root 11 Jul 9 04:26 bc3ef28 +-rw-r--r-- 1 1000 1000 48.1K Jul 9 04:26 bcm2711-rpi-4-b.dtb +-rw-r--r-- 1 1000 1000 48.1K Jul 9 04:26 bcm2711-rpi-400.dtb +-rw-r--r-- 1 1000 1000 48.7K Jul 9 04:26 bcm2711-rpi-cm4.dtb +drwxr-xr-x 2 root root 11 Jul 9 04:26 bf267951 +drwxr-xr-x 2 root root 11 Jul 9 04:26 c3052218 +-rw-r--r-- 1 1000 1000 206 Jul 9 04:26 config.txt +drwxr-xr-x 2 root root 11 Jul 9 04:26 d997b14e +drwxr-xr-x 2 root root 11 Jul 9 04:26 dd24784d +drwxr-xr-x 2 root root 11 Jul 9 04:26 ebc28a3f +drwxr-xr-x 3 root root 5 Jul 9 04:26 firmware +-rw-r--r-- 1 1000 1000 5.3K Jul 9 04:26 fixup4.dat +-rw-rw-r-- 1 1000 1000 968.6K Jul 8 23:19 ipxe-arm64.efi +-rw-rw-r-- 1 1000 1000 996.5K Jul 8 23:19 ipxe.efi +drwxr-xr-x 2 root root 3 Jul 9 04:26 overlays +-rw-r--r-- 1 1000 1000 2.1M Jul 9 04:26 start4.elf +-rw-rw-r-- 1 1000 1000 81.0K Jul 8 23:19 undionly.kpxe +-rw-rw-r-- 1 1000 1000 81.0K Jul 8 23:19 undionly.kpxe.0 +#+end_example + +* Preparing RPis for network booting +The following steps must be performed on each RPi + +** Flash the latest network EEPROM firmware +1. Fetch the latest release of EEPROM (network) from the [[https://github.com/raspberrypi/rpi-eeprom/releases][GitHub Repo]] +2. Write the contents of the zip file to a fat32 formatted microSD card +3. Insert and boot the microSD card on the RPi +4. Wait until the green screen before unplugging the power for the RPi + +** Bring up and configure the UEFI firmware +Into the non-networked target RPi, have spare keyboard and display plugged in, +1. Fetch a release of RPI4_UEFI firmware (currently using /v1.28/) from the [[https://github.com/pftf/RPi4/releases][GitHub Repo]] +2. Write the contents of the zip file to a fat32 formatted microSD card +3. Insert and boot the microSD card +4. Enter the UEFI set up by hitting /Esc/ + +*** Configure the UEFI firmware +1. Remove Memory limit: In /Device Manager -> Raspberry Pi Configuration -> Advanced Configuration/, set /Limit RAM to 3 GB/ to /Disabled/; F10 + Y to save. +2. Max out CPU clock: In /Device Manager -> Raspberry Pi Configuration -> CPU Configuration/, set /CPU clock/ to /Max/; F10 + Y to save. +3. Declare the iPXE HTTP boot URI: In /Device Manager -> Network Device List -> -> HTTP Boot Configuration/, set /Input the description/ to /boot.ii.nz/ and /Boot URI/ to http://boot.ii.nz/tftp/ipxe-arm64.efi; F10 + Y to save. +4. Tidy up the boot order: In /Boot Maintenance Manager -> Boot Options -> Delete Boot Option/, ensure that the following options remain (in no specific order): + - /boot.ii.nz/ + - /SD/MMC .../ + /Commit Changes and Exit/ to save. +4. Restructure the boot order: In /Boot Maintenance Manager -> Boot Options -> Change Boot Order/, set the order to: + - /boot.ii.nz/ + - /SD/MMC .../ + /Commit Changes and Exit/ to save. + +Once complete, /Esc/ the entire way out to the main menu and hit reset. When the RPi starts booting again, unplug from power before it reaches a source to boot from. + +Now, on the SD card with the UEFI firmware, it the file /RPI_EFI.fd/ must be copied into the [[./servers][./servers]] folder, by the board serial number. +Is it useful to find the serial number when the RPi is booted with no network or SD card (located on /board: <...> .../). + +* Bringing up servers with Sidero +Declare some common configuration +#+begin_src yaml :tangle ./sidero/local-cluster-rpi-template.yaml +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.244.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalCluster + name: ${CLUSTER_NAME} + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: ${CLUSTER_NAME}-cp +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalCluster +metadata: + name: ${CLUSTER_NAME} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT} + port: ${CONTROL_PLANE_PORT} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalMachineTemplate +metadata: + name: ${CLUSTER_NAME}-cp +spec: + template: + spec: + serverClassRef: + apiVersion: metal.sidero.dev/v1alpha1 + kind: ServerClass + name: ${CONTROL_PLANE_SERVERCLASS} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: TalosControlPlane +metadata: + name: ${CLUSTER_NAME}-cp +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + kind: MetalMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: ${CLUSTER_NAME}-cp + controlPlaneConfig: + init: + generateType: init + talosVersion: ${TALOS_VERSION} + configPatches: + - op: add + path: /machine/network + value: + interfaces: + - interface: eth0 + dhcp: true + vip: + ip: ${CONTROL_PLANE_ENDPOINT} + controlplane: + generateType: controlplane + talosVersion: ${TALOS_VERSION} + configPatches: + - op: add + path: /machine/network + value: + interfaces: + - interface: eth0 + dhcp: true + vip: + ip: ${CONTROL_PLANE_ENDPOINT} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-workers +spec: + template: + spec: + generateType: join + talosVersion: ${TALOS_VERSION} +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-workers +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + version: ${KUBERNETES_VERSION} + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + name: ${CLUSTER_NAME}-workers + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalMachineTemplate + name: ${CLUSTER_NAME}-workers +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalMachineTemplate +metadata: + name: ${CLUSTER_NAME}-workers +spec: + template: + spec: + serverClassRef: + apiVersion: metal.sidero.dev/v1alpha1 + kind: ServerClass + name: ${WORKER_SERVERCLASS} +#+end_src + +Declare the Environment for the RPis +#+begin_src yaml :tangle ./sidero/rpi-environment.yaml +apiVersion: metal.sidero.dev/v1alpha1 +kind: Environment +metadata: + name: raspberrypi4-servers +spec: + initrd: + url: https://github.com/talos-systems/talos/releases/download/v0.11.0/initramfs-arm64.xz + kernel: + args: + - console=tty0 + - console=ttyS0 + - consoleblank=0 + - earlyprintk=ttyS0 + - ima_appraise=fix + - ima_hash=sha512 + - ima_template=ima-ng + - init_on_alloc=1 + - initrd=initramfs.xz + - nvme_core.io_timeout=4294967295 + - printk.devkmsg=on + - pti=on + - random.trust_cpu=on + - slab_nomerge= + - talos.config=http://192.168.1.21:8081/configdata?uuid=${uuid} + - talos.platform=metal + url: https://github.com/talos-systems/talos/releases/download/v0.11.0/vmlinuz-arm64 +#+end_src + +Declare the ServerClass to use for RPis +#+begin_src yaml :tangle ./sidero/rpi-serverclass.yaml +apiVersion: metal.sidero.dev/v1alpha1 +kind: ServerClass +metadata: + name: raspberrypi4-servers +spec: + environmentRef: + name: raspberrypi4-servers + configPatches: + # - op: add + # path: /cluster/allowSchedulingOnMasters + # value: true + - op: replace + path: /machine/install + value: + disk: /dev/mmcblk1 + image: ghcr.io/talos-systems/installer:v0.11.0 + bootloader: true + wipe: false + force: false + qualifiers: + cpu: + - manufacturer: Broadcom + version: "BCM2711 (ARM Cortex-A72)" + systemInformation: + - manufacturer: "Raspberry Pi Foundation" + productName: "Raspberry Pi 4 Model B" +#+end_src + +Apply the ServerClass and Environment +#+begin_src shell :results silent +kubectl apply \ + -f ./sidero/rpi-serverclass.yaml \ + -f ./sidero/rpi-environment.yaml +#+end_src + +Create a namespace for the clusters +#+begin_src shell :results silent +kubectl create ns local-clusters +#+end_src + +Generate config +#+begin_src shell :results silent +export \ + CONTROL_PLANE_ENDPOINT=192.168.1.31 \ + CONTROL_PLANE_PORT=6443 \ + CONTROL_PLANE_SERVERCLASS=raspberrypi4-servers \ + KUBERNETES_VERSION=v1.21.2 \ + TALOS_VERSION=v1.11.0 \ + WORKER_SERVERCLASS=raspberrypi4-servers \ + WORKER_MACHINE_COUNT=5 \ + CONTROL_PLANE_MACHINE_COUNT=1 +clusterctl config cluster -n local-clusters local-cluster-mgmt --from ./sidero/local-cluster-rpi-template.yaml > ./sidero/local-clusters/local-cluster-mgmt.yaml +#+end_src + +Bring up the workload cluster +#+begin_src shell +kubectl apply -f ./sidero/local-clusters/local-cluster-mgmt.yaml +#+end_src + +#+RESULTS: +#+begin_example +cluster.cluster.x-k8s.io/local-cluster-mgmt created +metalcluster.infrastructure.cluster.x-k8s.io/local-cluster-mgmt created +metalmachinetemplate.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-cp created +taloscontrolplane.controlplane.cluster.x-k8s.io/local-cluster-mgmt-cp created +talosconfigtemplate.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers created +machinedeployment.cluster.x-k8s.io/local-cluster-mgmt-workers created +metalmachinetemplate.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers created +#+end_example + +*** Debug +See all things CAPI and Sidero +#+begin_src shell +kubectl get $(kubectl api-resources | grep -E 'x-k8s|sidero' | awk '{print $1}' | xargs | tr ' ' ',') -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME AGE +local-clusters talosconfig.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-cp-pxtxg 94m +local-clusters talosconfig.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers-hk9q5 94m +local-clusters talosconfig.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers-hlttg 94m +local-clusters talosconfig.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers-m7snc 14m +local-clusters talosconfig.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers-nd65c 94m +local-clusters talosconfig.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers-qmsxw 94m + +NAMESPACE NAME AGE +local-clusters talosconfigtemplate.bootstrap.cluster.x-k8s.io/local-cluster-mgmt-workers 94m + +NAMESPACE NAME PHASE +local-clusters cluster.cluster.x-k8s.io/local-cluster-mgmt Provisioned + +NAMESPACE NAME PHASE REPLICAS READY UPDATED UNAVAILABLE +local-clusters machinedeployment.cluster.x-k8s.io/local-cluster-mgmt-workers Running 5 5 5 + +NAMESPACE NAME PROVIDERID PHASE VERSION +local-clusters machine.cluster.x-k8s.io/local-cluster-mgmt-cp-l5bnd sidero://00c03111-0000-0000-0000-dca63203f4f8 Running v1.21.2 +local-clusters machine.cluster.x-k8s.io/local-cluster-mgmt-workers-647744d6cd-2hdrs sidero://00c03111-0000-0000-0000-dca632487ab4 Running v1.21.2 +local-clusters machine.cluster.x-k8s.io/local-cluster-mgmt-workers-647744d6cd-67kxr sidero://00c03112-0000-0000-0000-dca6327dcbba Running v1.21.2 +local-clusters machine.cluster.x-k8s.io/local-cluster-mgmt-workers-647744d6cd-p6zrd sidero://00c03111-0000-0000-0000-dca63203f59a Running v1.21.2 +local-clusters machine.cluster.x-k8s.io/local-cluster-mgmt-workers-647744d6cd-pljkq sidero://00c03111-0000-0000-0000-dca6321c2b8a Running v1.21.2 +local-clusters machine.cluster.x-k8s.io/local-cluster-mgmt-workers-647744d6cd-wn7bv sidero://00c03111-0000-0000-0000-dca6321c36d1 Running v1.21.2 + +NAMESPACE NAME REPLICAS AVAILABLE READY +local-clusters machineset.cluster.x-k8s.io/local-cluster-mgmt-workers-647744d6cd 5 5 5 + +NAMESPACE NAME TYPE PROVIDER VERSION WATCH NAMESPACE +cabpt-system provider.clusterctl.cluster.x-k8s.io/bootstrap-talos BootstrapProvider talos v0.2.0 +cacppt-system provider.clusterctl.cluster.x-k8s.io/control-plane-talos ControlPlaneProvider talos v0.1.0 +capi-system provider.clusterctl.cluster.x-k8s.io/cluster-api CoreProvider cluster-api v0.3.20 +sidero-system provider.clusterctl.cluster.x-k8s.io/infrastructure-sidero InfrastructureProvider sidero v0.3.0 + +NAMESPACE NAME READY INITIALIZED REPLICAS READY REPLICAS UNAVAILABLE REPLICAS +local-clusters taloscontrolplane.controlplane.cluster.x-k8s.io/local-cluster-mgmt-cp true true 1 1 + +NAMESPACE NAME CLUSTER READY +local-clusters metalcluster.infrastructure.cluster.x-k8s.io/local-cluster-mgmt local-cluster-mgmt true + +NAMESPACE NAME READY +local-clusters metalmachine.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-cp-sdpgq true +local-clusters metalmachine.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers-7lg59 true +local-clusters metalmachine.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers-jwnz9 true +local-clusters metalmachine.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers-lt82n true +local-clusters metalmachine.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers-nx7pg true +local-clusters metalmachine.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers-qmnhp true + +NAMESPACE NAME AGE +local-clusters metalmachinetemplate.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-cp 94m +local-clusters metalmachinetemplate.infrastructure.cluster.x-k8s.io/local-cluster-mgmt-workers 94m + +NAMESPACE NAME READY + serverbinding.infrastructure.cluster.x-k8s.io/00c03111-0000-0000-0000-dca63203f4f8 true + serverbinding.infrastructure.cluster.x-k8s.io/00c03111-0000-0000-0000-dca63203f59a true + serverbinding.infrastructure.cluster.x-k8s.io/00c03111-0000-0000-0000-dca6321c2b8a true + serverbinding.infrastructure.cluster.x-k8s.io/00c03111-0000-0000-0000-dca6321c36d1 true + serverbinding.infrastructure.cluster.x-k8s.io/00c03111-0000-0000-0000-dca632487ab4 true + serverbinding.infrastructure.cluster.x-k8s.io/00c03112-0000-0000-0000-dca6327dcbba true + +NAMESPACE NAME KERNEL INITRD READY + environment.metal.sidero.dev/default https://github.com/talos-systems/talos/releases/download/v0.10.3/vmlinuz-amd64 https://github.com/talos-systems/talos/releases/download/v0.10.3/initramfs-amd64.xz True + environment.metal.sidero.dev/raspberrypi4-servers https://github.com/talos-systems/talos/releases/download/v0.11.0/vmlinuz-arm64 https://github.com/talos-systems/talos/releases/download/v0.11.0/initramfs-arm64.xz True + +NAMESPACE NAME AVAILABLE IN USE + serverclass.metal.sidero.dev/any [] ["00c03111-0000-0000-0000-dca63203f4f8","00c03111-0000-0000-0000-dca63203f59a","00c03111-0000-0000-0000-dca6321c2b8a","00c03111-0000-0000-0000-dca6321c36d1","00c03111-0000-0000-0000-dca632487ab4","00c03112-0000-0000-0000-dca6327dcbba"] + serverclass.metal.sidero.dev/raspberrypi4-servers [] ["00c03111-0000-0000-0000-dca63203f4f8","00c03111-0000-0000-0000-dca63203f59a","00c03111-0000-0000-0000-dca6321c2b8a","00c03111-0000-0000-0000-dca6321c36d1","00c03111-0000-0000-0000-dca632487ab4","00c03112-0000-0000-0000-dca6327dcbba"] + +NAMESPACE NAME HOSTNAME ACCEPTED ALLOCATED CLEAN POWER + server.metal.sidero.dev/00c03111-0000-0000-0000-dca63203f4f8 Pi0 true true false on + server.metal.sidero.dev/00c03111-0000-0000-0000-dca63203f59a Pi7 true true false on + server.metal.sidero.dev/00c03111-0000-0000-0000-dca6321c2b8a Pi4 true true false on + server.metal.sidero.dev/00c03111-0000-0000-0000-dca6321c36d1 Pi2 true true false on + server.metal.sidero.dev/00c03111-0000-0000-0000-dca632487ab4 Pi5 true true false on + server.metal.sidero.dev/00c03112-0000-0000-0000-dca6327dcbba Pi1 true true false on +#+end_example + +*** Deleting the cluster +Delete the servers managed by CAPI +#+begin_src shell +CLUSTER_NAME=local-cluster-mgmt +NAMESPACE=local-clusters +SERVERS="$(kubectl -n ${NAMESPACE} get metalmachines -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} -o=jsonpath='{range .items[*]}server/{.spec.serverRef.name} {end}')" +MACHINES="$(kubectl -n ${NAMESPACE} get machines -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} -o=jsonpath='{range .items[*]}machine/{.metadata.name} {end}')" +METALMACHINES="$(kubectl -n ${NAMESPACE} get metalmachines -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} -o=jsonpath='{range .items[*]}metalmachine/{.metadata.name} {end}')" +kubectl -n "${NAMESPACE}" delete ${METALMACHINES} ${SERVER} ${MACHINES} +#+end_src + +#+RESULTS: +#+begin_example +metalmachine.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-cp-4tfx9" deleted +metalmachine.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-workers-bmj8c" deleted +metalmachine.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-workers-hp7jd" deleted +metalmachine.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-workers-hxg7f" deleted +metalmachine.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-workers-vdglk" deleted +metalmachine.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-workers-xz9cc" deleted +machine.cluster.x-k8s.io "local-cluster-mgmt-cp-k6rvc" deleted +machine.cluster.x-k8s.io "local-cluster-mgmt-workers-6f4b4cbf84-7vhfs" deleted +machine.cluster.x-k8s.io "local-cluster-mgmt-workers-6f4b4cbf84-fg7pf" deleted +machine.cluster.x-k8s.io "local-cluster-mgmt-workers-6f4b4cbf84-gtcc5" deleted +machine.cluster.x-k8s.io "local-cluster-mgmt-workers-6f4b4cbf84-px7bz" deleted +machine.cluster.x-k8s.io "local-cluster-mgmt-workers-6f4b4cbf84-tjct4" deleted +#+end_example + +(optional) Remove servers with no status +#+begin_src shell +kubectl delete server $(kubectl -n local-clusters get server -o=json | jq -r '.items[] | select(.status==null) | .metadata.name') +#+end_src + +#+RESULTS: +#+begin_example +kubectl delete server +#+end_example + +Remove the cluster +#+begin_src shell +kubectl delete -f ./sidero/local-clusters/local-cluster-mgmt.yaml +#+end_src + +#+RESULTS: +#+begin_example +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": clusters.cluster.x-k8s.io "local-cluster-mgmt" not found +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": metalclusters.infrastructure.cluster.x-k8s.io "local-cluster-mgmt" not found +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": metalmachinetemplates.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-cp" not found +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": taloscontrolplanes.controlplane.cluster.x-k8s.io "local-cluster-mgmt-cp" not found +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": talosconfigtemplates.bootstrap.cluster.x-k8s.io "local-cluster-mgmt-workers" not found +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": machinedeployments.cluster.x-k8s.io "local-cluster-mgmt-workers" not found +Error from server (NotFound): error when deleting "./sidero/local-clusters/local-cluster-mgmt.yaml": metalmachinetemplates.infrastructure.cluster.x-k8s.io "local-cluster-mgmt-workers" not found +#+end_example + +** Get TalosConfig +#+begin_src tmate :window mgm-cluster +export TALOSCONFIG=/tmp/local-cluster-mgmt-talosconfig +kubectl -n local-clusters get talosconfig -l cluster.x-k8s.io/cluster-name=local-cluster-mgmt -o=jsonpath='{.items[0].status.talosConfig}' > "${TALOSCONFIG}" +#+end_src + +** Get KubeConfig +#+begin_src tmate :window mgm-cluster +export KUBECONFIG=/tmp/local-cluster-mgmt-kubeconfig +talosctl --talosconfig "${TALOSCONFIG}" -e 192.168.1.31 -n 192.168.1.31 kubeconfig "${KUBECONFIG}" +#+end_src + +** Check health of Nodes +#+begin_src shell :wrap "SRC text" +export KUBECONFIG=/tmp/local-cluster-mgmt-kubeconfig +kubectl get nodes +#+end_src + +#+RESULTS: +#+begin_SRC text +NAME STATUS ROLES AGE VERSION +pi0 Ready 82m v1.21.2 +pi1 Ready 82m v1.21.2 +pi2 Ready 82m v1.21.2 +pi4 Ready 5m12s v1.21.2 +pi5 Ready 5m36s v1.21.2 +pi7 Ready 82m v1.21.2 +#+end_SRC + +** Fetch a list of all of the Pods +#+begin_src shell :wrap "SRC text" +export KUBECONFIG=/tmp/local-cluster-mgmt-kubeconfig +kubectl get pods -A +#+end_src + +#+RESULTS: +#+begin_SRC text +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-6ff77786fb-cdr8s 1/1 Running 5 83m +kube-system coredns-6ff77786fb-jx4t2 1/1 Running 5 83m +kube-system kube-apiserver-pi0 1/1 Running 4 81m +kube-system kube-controller-manager-pi0 1/1 Running 11 82m +kube-system kube-flannel-c4l95 1/1 Running 5 82m +kube-system kube-flannel-hrflr 1/1 Running 10 82m +kube-system kube-flannel-mqvq2 1/1 Running 10 82m +kube-system kube-flannel-n47hn 1/1 Running 0 5m45s +kube-system kube-flannel-x6b65 1/1 Running 4 82m +kube-system kube-flannel-zqz22 1/1 Running 0 6m9s +kube-system kube-proxy-br6pp 1/1 Running 5 82m +kube-system kube-proxy-fdmlg 1/1 Running 0 6m9s +kube-system kube-proxy-jxqmm 1/1 Running 5 82m +kube-system kube-proxy-khmbb 1/1 Running 4 82m +kube-system kube-proxy-mrhnb 1/1 Running 0 5m45s +kube-system kube-proxy-nlz8k 1/1 Running 5 82m +kube-system kube-scheduler-pi0 1/1 Running 11 81m +#+end_SRC diff --git a/ii/local-cluster/cert-manager/cert-manager.yaml b/ii/local-cluster/cert-manager/cert-manager.yaml new file mode 100644 index 0000000..d539e11 --- /dev/null +++ b/ii/local-cluster/cert-manager/cert-manager.yaml @@ -0,0 +1,17188 @@ +# Copyright The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: certificaterequests.cert-manager.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: cert-manager.io + names: + categories: + - cert-manager + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + properties: + csr: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + format: byte + type: string + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + additionalProperties: + items: + type: string + type: array + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + items: + type: string + type: array + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + required: + - csr + - issuerRef + type: object + status: + description: Status of the CertificateRequest. This is set and managed automatically. + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + format: byte + type: string + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + format: byte + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + required: + - status + - type + type: object + type: array + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + properties: + csr: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + format: byte + type: string + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + additionalProperties: + items: + type: string + type: array + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + items: + type: string + type: array + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + required: + - csr + - issuerRef + type: object + status: + description: Status of the CertificateRequest. This is set and managed automatically. + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + format: byte + type: string + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + format: byte + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + required: + - status + - type + type: object + type: array + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + properties: + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + additionalProperties: + items: + type: string + type: array + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + items: + type: string + type: array + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + request: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + format: byte + type: string + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + required: + - issuerRef + - request + type: object + status: + description: Status of the CertificateRequest. This is set and managed automatically. + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + format: byte + type: string + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + format: byte + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + required: + - status + - type + type: object + type: array + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + properties: + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + additionalProperties: + items: + type: string + type: array + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + items: + type: string + type: array + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + request: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + format: byte + type: string + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + required: + - issuerRef + - request + type: object + status: + description: Status of the CertificateRequest. This is set and managed automatically. + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + format: byte + type: string + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + format: byte + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + required: + - status + - type + type: object + type: array + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: certificates.cert-manager.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: cert-manager.io + names: + categories: + - cert-manager + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + properties: + commonName: + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + items: + type: string + type: array + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailSANs: + description: EmailSANs is a list of email subjectAltNames to be set on the Certificate. + items: + type: string + type: array + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + items: + type: string + type: array + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + keyAlgorithm: + description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `rsa` or `ecdsa` If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for `ecdsa` key algorithm and key size of 2048 will be used for `rsa` key algorithm. + enum: + - rsa + - ecdsa + type: string + keyEncoding: + description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are `pkcs1` and `pkcs8` standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then `pkcs1` will be used by default. + enum: + - pkcs1 + - pkcs8 + type: string + keySize: + description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `rsa`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ecdsa`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + type: integer + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + type: object + organization: + description: Organization is a list of organizations to be used on the Certificate. + items: + type: string + type: array + privateKey: + description: Options to control private keys used for the Certificate. + properties: + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + type: object + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + format: int32 + type: integer + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + properties: + countries: + description: Countries to be used on the Certificate. + items: + type: string + type: array + localities: + description: Cities to be used on the Certificate. + items: + type: string + type: array + organizationalUnits: + description: Organizational Units to be used on the Certificate. + items: + type: string + type: array + postalCodes: + description: Postal codes to be used on the Certificate. + items: + type: string + type: array + provinces: + description: State/Provinces to be used on the Certificate. + items: + type: string + type: array + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + items: + type: string + type: array + type: object + uriSANs: + description: URISANs is a list of URI subjectAltNames to be set on the Certificate. + items: + type: string + type: array + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + required: + - issuerRef + - secretName + type: object + status: + description: Status of the Certificate. This is set and managed automatically. + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + items: + description: CertificateCondition contains condition information for an Certificate. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + required: + - status + - type + type: object + type: array + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + format: date-time + type: string + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + format: date-time + type: string + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + format: date-time + type: string + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + format: date-time + type: string + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + properties: + commonName: + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + items: + type: string + type: array + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailSANs: + description: EmailSANs is a list of email subjectAltNames to be set on the Certificate. + items: + type: string + type: array + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + items: + type: string + type: array + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + keyAlgorithm: + description: KeyAlgorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `rsa` or `ecdsa` If `keyAlgorithm` is specified and `keySize` is not provided, key size of 256 will be used for `ecdsa` key algorithm and key size of 2048 will be used for `rsa` key algorithm. + enum: + - rsa + - ecdsa + type: string + keyEncoding: + description: KeyEncoding is the private key cryptography standards (PKCS) for this certificate's private key to be encoded in. If provided, allowed values are `pkcs1` and `pkcs8` standing for PKCS#1 and PKCS#8, respectively. If KeyEncoding is not specified, then `pkcs1` will be used by default. + enum: + - pkcs1 + - pkcs8 + type: string + keySize: + description: KeySize is the key bit size of the corresponding private key for this certificate. If `keyAlgorithm` is set to `rsa`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `keyAlgorithm` is set to `ecdsa`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + type: integer + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority. + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority. + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + type: object + privateKey: + description: Options to control private keys used for the Certificate. + properties: + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + type: object + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + format: int32 + type: integer + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + properties: + countries: + description: Countries to be used on the Certificate. + items: + type: string + type: array + localities: + description: Cities to be used on the Certificate. + items: + type: string + type: array + organizationalUnits: + description: Organizational Units to be used on the Certificate. + items: + type: string + type: array + organizations: + description: Organizations to be used on the Certificate. + items: + type: string + type: array + postalCodes: + description: Postal codes to be used on the Certificate. + items: + type: string + type: array + provinces: + description: State/Provinces to be used on the Certificate. + items: + type: string + type: array + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + items: + type: string + type: array + type: object + uriSANs: + description: URISANs is a list of URI subjectAltNames to be set on the Certificate. + items: + type: string + type: array + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + required: + - issuerRef + - secretName + type: object + status: + description: Status of the Certificate. This is set and managed automatically. + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + items: + description: CertificateCondition contains condition information for an Certificate. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + required: + - status + - type + type: object + type: array + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + format: date-time + type: string + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + format: date-time + type: string + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + format: date-time + type: string + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + format: date-time + type: string + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + properties: + commonName: + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + items: + type: string + type: array + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailSANs: + description: EmailSANs is a list of email subjectAltNames to be set on the Certificate. + items: + type: string + type: array + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + items: + type: string + type: array + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + type: object + privateKey: + description: Options to control private keys used for the Certificate. + properties: + algorithm: + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. + enum: + - RSA + - ECDSA + type: string + encoding: + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. + enum: + - PKCS1 + - PKCS8 + type: string + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + size: + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + type: integer + type: object + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + format: int32 + type: integer + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + properties: + countries: + description: Countries to be used on the Certificate. + items: + type: string + type: array + localities: + description: Cities to be used on the Certificate. + items: + type: string + type: array + organizationalUnits: + description: Organizational Units to be used on the Certificate. + items: + type: string + type: array + organizations: + description: Organizations to be used on the Certificate. + items: + type: string + type: array + postalCodes: + description: Postal codes to be used on the Certificate. + items: + type: string + type: array + provinces: + description: State/Provinces to be used on the Certificate. + items: + type: string + type: array + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + items: + type: string + type: array + type: object + uriSANs: + description: URISANs is a list of URI subjectAltNames to be set on the Certificate. + items: + type: string + type: array + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + required: + - issuerRef + - secretName + type: object + status: + description: Status of the Certificate. This is set and managed automatically. + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + items: + description: CertificateCondition contains condition information for an Certificate. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + required: + - status + - type + type: object + type: array + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + format: date-time + type: string + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + format: date-time + type: string + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + format: date-time + type: string + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + format: date-time + type: string + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + properties: + commonName: + description: 'CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4' + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + items: + type: string + type: array + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailAddresses: + description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate. + items: + type: string + type: array + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + items: + type: string + type: array + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will only be updated upon re-issuance. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - create + - passwordSecretRef + type: object + type: object + privateKey: + description: Options to control private keys used for the Certificate. + properties: + algorithm: + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. + enum: + - RSA + - ECDSA + type: string + encoding: + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. + enum: + - PKCS1 + - PKCS8 + type: string + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + size: + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. No other values are allowed. + type: integer + type: object + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + format: int32 + type: integer + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + properties: + countries: + description: Countries to be used on the Certificate. + items: + type: string + type: array + localities: + description: Cities to be used on the Certificate. + items: + type: string + type: array + organizationalUnits: + description: Organizational Units to be used on the Certificate. + items: + type: string + type: array + organizations: + description: Organizations to be used on the Certificate. + items: + type: string + type: array + postalCodes: + description: Postal codes to be used on the Certificate. + items: + type: string + type: array + provinces: + description: State/Provinces to be used on the Certificate. + items: + type: string + type: array + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + items: + type: string + type: array + type: object + uris: + description: URIs is a list of URI subjectAltNames to be set on the Certificate. + items: + type: string + type: array + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + type: string + type: array + required: + - issuerRef + - secretName + type: object + status: + description: Status of the Certificate. This is set and managed automatically. + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + items: + description: CertificateCondition contains condition information for an Certificate. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + required: + - status + - type + type: object + type: array + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + format: date-time + type: string + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + format: date-time + type: string + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + format: date-time + type: string + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + format: date-time + type: string + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: challenges.acme.cert-manager.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: acme.cert-manager.io + names: + categories: + - cert-manager + - cert-manager-acme + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: DNSName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + key: + description: 'Key is the ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Solver contains the domain solving configuration that should be used to solve this challenge resource. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmedns: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azuredns: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + clouddns: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + token: + description: Token is the ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: Type is the type of ACME challenge this resource represents. One of "http-01" or "dns-01". + enum: + - http-01 + - dns-01 + type: string + url: + description: URL is the URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + required: + - authzURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + type: object + status: + properties: + presented: + description: Presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Processing is used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Reason contains human readable information on why the Challenge is in the current state. + type: string + state: + description: State contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + type: object + required: + - metadata + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: DNSName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + key: + description: 'Key is the ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Solver contains the domain solving configuration that should be used to solve this challenge resource. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmedns: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azuredns: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + clouddns: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + token: + description: Token is the ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: Type is the type of ACME challenge this resource represents. One of "http-01" or "dns-01". + enum: + - http-01 + - dns-01 + type: string + url: + description: URL is the URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + required: + - authzURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + type: object + status: + properties: + presented: + description: Presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Processing is used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Reason contains human readable information on why the Challenge is in the current state. + type: string + state: + description: State contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + type: object + required: + - metadata + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + key: + description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + enum: + - HTTP-01 + - DNS-01 + type: string + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + type: object + status: + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + key: + description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + enum: + - HTTP-01 + - DNS-01 + type: string + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + type: object + status: + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: clusterissuers.cert-manager.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: cert-manager.io + names: + categories: + - cert-manager + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmedns: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azuredns: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + clouddns: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmedns: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azuredns: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + clouddns: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: issuers.cert-manager.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: cert-manager.io + names: + categories: + - cert-manager + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmedns: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azuredns: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + clouddns: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the Issuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmedns: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azuredns: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + clouddns: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the Issuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the Issuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + properties: + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + enum: + - HS256 + - HS384 + - HS512 + type: string + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - keyID + - keySecretRef + type: object + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + maxLength: 64 + type: string + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have their TLS certificate validated (i.e. insecure connections will be allowed). Only enable this option in development environments. The cert-manager system installed roots will be used to verify connections to the ACME server if this is false. Defaults to false. + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + items: + description: Configures an issuer to solve challenges using the specified options. Only one of HTTP01 or DNS01 may be provided. + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + host: + type: string + required: + - accountSecretRef + - host + type: object + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + serviceConsumerDomain: + type: string + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + type: object + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + environment: + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + type: string + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + required: + - resourceGroupName + - subscriptionID + type: object + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - project + type: object + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + email: + description: Email of the account, only required when using API key based authentication. + type: string + type: object + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + enum: + - None + - Follow + type: string + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - tokenSecretRef + type: object + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - nameserver + type: object + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - region + type: object + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + required: + - groupName + - solverName + type: object + type: object + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + properties: + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + type: object + type: object + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + properties: + annotations: + additionalProperties: + type: string + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + labels: + additionalProperties: + type: string + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + type: object + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + serviceType: + description: Optional service type for Kubernetes solver service + type: string + type: object + type: object + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + additionalProperties: + type: string + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + type: array + required: + - privateKeySecretRef + - server + type: object + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + items: + type: string + type: array + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + items: + type: string + type: array + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + items: + type: string + type: array + type: object + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + required: + - role + - secretRef + type: object + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + type: object + caBundle: + description: PEM encoded CA bundle used to validate Vault server certificate. Only used if the Server URL is using HTTPS protocol. This parameter is ignored for plain HTTP protocol connection. If not set the system root certificates are used to validate the TLS connection. + format: byte + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + required: + - auth + - path + - server + type: object + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + required: + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certificate to use to verify connections to the TPP instance. If specified, system roots will not be used and the issuing CA for the TPP instance must be verifiable using the provided root. If not specified, the connection will be verified using the cert-manager system root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - name + type: object + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + required: + - credentialsRef + - url + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + description: Status of the Issuer. This is set and managed automatically. + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + type: object + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + items: + description: IssuerCondition contains condition information for an Issuer. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + format: int64 + type: integer + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, known values are (`Ready`). + type: string + required: + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: orders.acme.cert-manager.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /convert + conversionReviewVersions: + - v1 + - v1beta1 + group: acme.cert-manager.io + names: + categories: + - cert-manager + - cert-manager-acme + kind: Order + listKind: OrderList + plural: orders + singular: order + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha2 + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + csr: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + format: byte + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + required: + - csr + - issuerRef + type: object + status: + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + required: + - token + - type + - url + type: object + type: array + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + required: + - url + type: object + type: array + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + format: byte + type: string + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + type: object + required: + - metadata + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha3 + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + csr: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + format: byte + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + required: + - csr + - issuerRef + type: object + status: + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + required: + - token + - type + - url + type: object + type: array + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + required: + - url + type: object + type: array + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + format: byte + type: string + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + type: object + required: + - metadata + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + format: byte + type: string + required: + - issuerRef + - request + type: object + status: + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + required: + - token + - type + - url + type: object + type: array + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + required: + - url + type: object + type: array + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + format: byte + type: string + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + items: + type: string + type: array + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + required: + - name + type: object + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + format: byte + type: string + required: + - issuerRef + - request + type: object + status: + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + required: + - token + - type + - url + type: object + type: array + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + required: + - url + type: object + type: array + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + format: byte + type: string + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + format: date-time + type: string + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-cainjector + namespace: cert-manager +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager + namespace: cert-manager +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-cainjector +rules: + - apiGroups: + - cert-manager.io + resources: + - certificates + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - create + - update + - patch + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - get + - list + - watch + - update + - apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - list + - watch + - update + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - update + - apiGroups: + - auditregistration.k8s.io + resources: + - auditsinks + verbs: + - get + - list + - watch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-issuers +rules: + - apiGroups: + - cert-manager.io + resources: + - issuers + - issuers/status + verbs: + - update + - apiGroups: + - cert-manager.io + resources: + - issuers + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-clusterissuers +rules: + - apiGroups: + - cert-manager.io + resources: + - clusterissuers + - clusterissuers/status + verbs: + - update + - apiGroups: + - cert-manager.io + resources: + - clusterissuers + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-certificates +rules: + - apiGroups: + - cert-manager.io + resources: + - certificates + - certificates/status + - certificaterequests + - certificaterequests/status + verbs: + - update + - apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + - clusterissuers + - issuers + verbs: + - get + - list + - watch + - apiGroups: + - cert-manager.io + resources: + - certificates/finalizers + - certificaterequests/finalizers + verbs: + - update + - apiGroups: + - acme.cert-manager.io + resources: + - orders + verbs: + - create + - delete + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-orders +rules: + - apiGroups: + - acme.cert-manager.io + resources: + - orders + - orders/status + verbs: + - update + - apiGroups: + - acme.cert-manager.io + resources: + - orders + - challenges + verbs: + - get + - list + - watch + - apiGroups: + - cert-manager.io + resources: + - clusterissuers + - issuers + verbs: + - get + - list + - watch + - apiGroups: + - acme.cert-manager.io + resources: + - challenges + verbs: + - create + - delete + - apiGroups: + - acme.cert-manager.io + resources: + - orders/finalizers + verbs: + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-challenges +rules: + - apiGroups: + - acme.cert-manager.io + resources: + - challenges + - challenges/status + verbs: + - update + - apiGroups: + - acme.cert-manager.io + resources: + - challenges + verbs: + - get + - list + - watch + - apiGroups: + - cert-manager.io + resources: + - issuers + - clusterissuers + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - pods + - services + verbs: + - get + - list + - watch + - create + - delete + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - create + - delete + - update + - apiGroups: + - route.openshift.io + resources: + - routes/custom-host + verbs: + - create + - apiGroups: + - acme.cert-manager.io + resources: + - challenges/finalizers + verbs: + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-ingress-shim +rules: + - apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + verbs: + - create + - update + - delete + - apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + - issuers + - clusterissuers + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/finalizers + verbs: + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: cert-manager-view +rules: + - apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + - issuers + verbs: + - get + - list + - watch + - apiGroups: + - acme.cert-manager.io + resources: + - challenges + - orders + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: cert-manager-edit +rules: + - apiGroups: + - cert-manager.io + resources: + - certificates + - certificaterequests + - issuers + verbs: + - create + - delete + - deletecollection + - patch + - update + - apiGroups: + - acme.cert-manager.io + resources: + - challenges + - orders + verbs: + - create + - delete + - deletecollection + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-approve:cert-manager-io +rules: + - apiGroups: + - cert-manager.io + resourceNames: + - issuers.cert-manager.io/* + - clusterissuers.cert-manager.io/* + resources: + - signers + verbs: + - approve +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-certificatesigningrequests +rules: + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch + - update + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - update + - apiGroups: + - certificates.k8s.io + resourceNames: + - issuers.cert-manager.io/* + - clusterissuers.cert-manager.io/* + resources: + - signers + verbs: + - sign + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook:subjectaccessreviews +rules: + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-cainjector +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-issuers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-clusterissuers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-certificates +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-orders +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-challenges +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-ingress-shim +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-approve:cert-manager-io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-controller-certificatesigningrequests +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook:subjectaccessreviews +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-cainjector:leaderelection + namespace: kube-system +rules: + - apiGroups: + - "" + resourceNames: + - cert-manager-cainjector-leader-election + - cert-manager-cainjector-leader-election-core + resources: + - configmaps + verbs: + - get + - update + - patch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cert-manager-cainjector-leader-election + - cert-manager-cainjector-leader-election-core + resources: + - leases + verbs: + - get + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager:leaderelection + namespace: kube-system +rules: + - apiGroups: + - "" + resourceNames: + - cert-manager-controller + resources: + - configmaps + verbs: + - get + - update + - patch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cert-manager-controller + resources: + - leases + verbs: + - get + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager +rules: + - apiGroups: + - "" + resourceNames: + - cert-manager-webhook-ca + resources: + - secrets + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-cainjector:leaderelection + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager:leaderelection + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager + namespace: cert-manager +spec: + ports: + - port: 9402 + protocol: TCP + targetPort: 9402 + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: cert-manager + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook + namespace: cert-manager +spec: + ports: + - name: https + port: 443 + targetPort: 10250 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: webhook + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-cainjector + namespace: cert-manager +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: cainjector + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/component: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cainjector + helm.sh/chart: cert-manager-v1.4.0 + spec: + containers: + - args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: quay.io/jetstack/cert-manager-cainjector:v1.4.0 + imagePullPolicy: IfNotPresent + name: cert-manager + resources: {} + securityContext: + runAsNonRoot: true + serviceAccountName: cert-manager-cainjector +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager + namespace: cert-manager +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: cert-manager + template: + metadata: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "9402" + prometheus.io/scrape: "true" + labels: + app: cert-manager + app.kubernetes.io/component: controller + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: cert-manager + helm.sh/chart: cert-manager-v1.4.0 + spec: + containers: + - args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: quay.io/jetstack/cert-manager-controller:v1.4.0 + imagePullPolicy: IfNotPresent + name: cert-manager + ports: + - containerPort: 9402 + protocol: TCP + resources: {} + securityContext: + runAsNonRoot: true + serviceAccountName: cert-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook + namespace: cert-manager +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/name: webhook + template: + metadata: + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + spec: + containers: + - args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook,cert-manager-webhook.cert-manager,cert-manager-webhook.cert-manager.svc + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: quay.io/jetstack/cert-manager-webhook:v1.4.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: cert-manager + ports: + - containerPort: 10250 + name: https + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + securityContext: + runAsNonRoot: true + serviceAccountName: cert-manager-webhook +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook +webhooks: + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /mutate + failurePolicy: Fail + name: webhook.cert-manager.io + rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - '*/*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: cert-manager/cert-manager-webhook-ca + labels: + app: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: webhook + helm.sh/chart: cert-manager-v1.4.0 + name: cert-manager-webhook +webhooks: + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /validate + failurePolicy: Fail + name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: cert-manager.io/disable-validation + operator: NotIn + values: + - "true" + - key: name + operator: NotIn + values: + - cert-manager + rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - '*/*' + sideEffects: None + timeoutSeconds: 10 + diff --git a/ii/local-cluster/cool-files.yaml b/ii/local-cluster/cool-files.yaml new file mode 100644 index 0000000..3411543 --- /dev/null +++ b/ii/local-cluster/cool-files.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cool-files-are-here-ii-nz +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: cool-files-are-here-ii-nz-letsencrypt-prod +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: letsencrypt@ii.coop + privateKeySecretRef: + name: cool-files-are-here-ii-nz-letsencrypt-prod + solvers: + - http01: + ingress: + class: nginx +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: cool-files-are-here-ii-nz-letsencrypt-prod + namespace: cool-files-are-here-ii-nz +spec: + secretName: cool-files-are-here-ii-nz-letsencrypt-prod + issuerRef: + name: cool-files-are-here-ii-nz-letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - cool-files-are-here.ii.nz +--- +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: cool-files-are-here-ii-nz + namespace: cool-files-are-here-ii-nz +spec: + releaseName: cool-files-are-here-ii-nz + chart: + git: https://gitlab.com/safesurfer/go-http-server + ref: 1.4.0 + path: deployments/go-http-server + values: + securityContext: + readOnlyRootFilesystem: false + runAsUser: 0 + runAsGroup: 0 + runAsNonRoot: false + service: + port: 80 + image: + repository: docker.io/nginx + tag: stable + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - ii-thinkpad-p70 + ingress: + enabled: true + hosts: + - host: cool-files-are-here.ii.nz + paths: + - / + # tls: + # - secretName: letsencrypt-prod + # hosts: + # - cool-files-are-here.ii.nz + extraVolumeMounts: + - name: humacs-home-ii + mountPath: /usr/share/nginx/html + extraVolumes: + - name: humacs-home-ii + hostPath: + path: /home/ii/Downloads/videos diff --git a/ii/local-cluster/dnsmasq/Dockerfile b/ii/local-cluster/dnsmasq/Dockerfile new file mode 100644 index 0000000..56a5511 --- /dev/null +++ b/ii/local-cluster/dnsmasq/Dockerfile @@ -0,0 +1,6 @@ +FROM alpine:3.12 AS final +RUN apk add --no-cache tcpdump curl dnsmasq-dnssec gettext bash +# TODO run as non-root +RUN mkdir -p /etc/default/ && \ + echo -e "ENABLED=1\nIGNORE_RESOLVCONF=yes" > /etc/default/dnsmasq +ENTRYPOINT ["dnsmasq","--no-daemon"] diff --git a/ii/local-cluster/dnsmasq/debug-pod.yaml b/ii/local-cluster/dnsmasq/debug-pod.yaml new file mode 100644 index 0000000..c97c7c8 --- /dev/null +++ b/ii/local-cluster/dnsmasq/debug-pod.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + run: dnsmasq-debug + name: dnsmasq-debug + namespace: dnsmasq +spec: + hostNetwork: true + containers: + - image: alpine:3.12 + name: dnsmasq-debug + securityContext: + privileged: true + command: + - sleep + - infinity + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - dnsmasq + topologyKey: "kubernetes.io/hostname" + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/ii/local-cluster/dnsmasq/dnsmasq-pvc.yaml b/ii/local-cluster/dnsmasq/dnsmasq-pvc.yaml new file mode 100644 index 0000000..21f0e77 --- /dev/null +++ b/ii/local-cluster/dnsmasq/dnsmasq-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: dnsmasq-tftp + namespace: dnsmasq +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/ii/local-cluster/dnsmasq/dnsmasq.conf b/ii/local-cluster/dnsmasq/dnsmasq.conf new file mode 100644 index 0000000..5d0978f --- /dev/null +++ b/ii/local-cluster/dnsmasq/dnsmasq.conf @@ -0,0 +1,13 @@ +#dnsmasq config, for a complete example, see: +# http://oss.segetech.com/intra/srv/dnsmasq.conf + +port=0 +dhcp-range=${DHCP_RANGE},proxy +pxe-service=0,"Raspberry Pi Boot" +pxe-prompt="PXE booting Talos from Sidero in",0 +dhcp-boot=ipxe-arm64.efi,sidero +log-queries +log-dhcp + +enable-tftp=* +tftp-root=/var/lib/sidero/tftp diff --git a/ii/local-cluster/dnsmasq/dnsmasq.yaml b/ii/local-cluster/dnsmasq/dnsmasq.yaml new file mode 100644 index 0000000..24b41d2 --- /dev/null +++ b/ii/local-cluster/dnsmasq/dnsmasq.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dnsmasq + namespace: dnsmasq + labels: + nz.ii: dnsmasq + app: dnsmasq +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + nz.ii: dnsmasq + template: + metadata: + annotations: + nz.ii/dnsmasq.conf-sha256sum: "${DNSMASQ_CONF_HASH}" + nz.ii/dockerfile-sha256sum: "${DOCKERFILE_HASH}" + labels: + nz.ii: dnsmasq + app: dnsmasq + spec: + hostNetwork: true + containers: + - name: dnsmasq + image: registry.gitlab.com/ii/nz/dnsmasq:latest + imagePullPolicy: Always + volumeMounts: + - name: config + mountPath: /etc/dnsmasq + - name: tftp-folder + mountPath: /var/lib/sidero/tftp + env: + - name: DHCP_RANGE + value: "${DHCP_RANGE}" + command: + - bash + - -x + - -c + - dnsmasq --no-daemon -C <(envsubst < /etc/dnsmasq/dnsmasq.conf) + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_ADMIN + ports: + - containerPort: 67 + hostPort: 67 + protocol: UDP + - containerPort: 4011 + hostPort: 4011 + protocol: UDP + - containerPort: 7472 + hostPort: 7472 + protocol: UDP + volumes: + - name: config + configMap: + name: dnsmasq-config + - name: tftp-folder + persistentVolumeClaim: + claimName: dnsmasq-tftp diff --git a/ii/local-cluster/helm-operator/helm-operator-crds.yaml b/ii/local-cluster/helm-operator/helm-operator-crds.yaml new file mode 100644 index 0000000..8b6c7c5 --- /dev/null +++ b/ii/local-cluster/helm-operator/helm-operator-crds.yaml @@ -0,0 +1,418 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: helmreleases.helm.fluxcd.io +spec: + additionalPrinterColumns: + - JSONPath: .status.releaseName + name: Release + type: string + description: ReleaseName is the name of the Helm release managed by the HelmRelease, + as given by Helm. + - JSONPath: .status.phase + name: Phase + type: string + description: Phase is the current release phase being performed for the HelmRelease. + - JSONPath: .status.releaseStatus + name: Status + type: string + description: ReleaseStatus is the status of the Helm release managed by the HelmRelease, + as given by Helm. + - JSONPath: .status.conditions[?(@.type=="Released")].message + name: Message + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: helm.fluxcd.io + names: + kind: HelmRelease + listKind: HelmReleaseList + plural: helmreleases + shortNames: + - hr + - hrs + singular: helmrelease + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: HelmRelease is a type to represent a Helm release. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - chart + properties: + chart: + type: object + properties: + chartPullSecret: + description: ChartPullSecret holds the reference to the authentication + secret for accessing the Helm repository using HTTPS basic auth. + NOT IMPLEMENTED! + type: object + required: + - name + properties: + name: + type: string + git: + description: Git URL is the URL of the Git repository, e.g. `git@github.com:org/repo`, + `http://github.com/org/repo`, or `ssh://git@example.com:2222/org/repo.git`. + type: string + name: + description: Name is the name of the Helm chart _without_ an alias, + e.g. redis (for `helm upgrade [flags] stable/redis`). + type: string + path: + description: Path is the path to the chart relative to the repository + root. + type: string + ref: + description: Ref is the Git branch (or other reference) to use. + Defaults to 'master', or the configured default Git ref. + type: string + repository: + description: RepoURL is the URL of the Helm repository, e.g. `https://kubernetes-charts.storage.googleapis.com` + or `https://charts.example.com`. + type: string + secretRef: + description: SecretRef holds the authentication secret for accessing + the Git repository (over HTTPS). The credentials will be added + to an HTTPS GitURL before the mirror is started. + type: object + required: + - name + properties: + name: + type: string + namespace: + type: string + skipDepUpdate: + description: SkipDepUpdate will tell the operator to skip running + 'helm dep update' before installing or upgrading the chart, the + chart dependencies _must_ be present for this to succeed. + type: boolean + version: + description: Version is the targeted Helm chart version, e.g. 7.0.1. + type: string + disableOpenAPIValidation: + description: DisableOpenAPIValidation controls whether OpenAPI validation + is enforced. + type: boolean + forceUpgrade: + description: Force will mark this Helm release to `--force` upgrades. + This forces the resource updates through delete/recreate if needed. + type: boolean + helmVersion: + description: 'HelmVersion is the version of Helm to target. If not supplied, + the lowest _enabled Helm version_ will be targeted. Valid HelmVersion + values are: "v2", "v3"' + type: string + enum: + - v2 + - v3 + maxHistory: + description: MaxHistory is the maximum amount of revisions to keep for + the Helm release. If not supplied, it defaults to 10. + type: integer + releaseName: + description: ReleaseName is the name of the The Helm release. If not + supplied, it will be generated by affixing the namespace to the resource + name. + type: string + resetValues: + description: ResetValues will mark this Helm release to reset the values + to the defaults of the targeted chart before performing an upgrade. + Not explicitly setting this to `false` equals to `true` due to the + declarative nature of the operator. + type: boolean + rollback: + description: The rollback settings for this Helm release. + type: object + properties: + disableHooks: + description: DisableHooks will mark this Helm release to prevent + hooks from running during the rollback. + type: boolean + enable: + description: Enable will mark this Helm release for rollbacks. + type: boolean + force: + description: Force will mark this Helm release to `--force` rollbacks. + This forces the resource updates through delete/recreate if needed. + type: boolean + maxRetries: + description: MaxRetries is the maximum amount of upgrade retries + the operator should make before bailing. + type: integer + format: int64 + recreate: + description: Recreate will mark this Helm release to `--recreate-pods` + for if applicable. This performs pod restarts. + type: boolean + retry: + description: Retry will mark this Helm release for upgrade retries + after a rollback. + type: boolean + timeout: + description: Timeout is the time to wait for any individual Kubernetes + operation (like Jobs for hooks) during rollback. + type: integer + format: int64 + wait: + description: Wait will mark this Helm release to wait until all + Pods, PVCs, Services, and minimum number of Pods of a Deployment, + StatefulSet, or ReplicaSet are in a ready state before marking + the release as successful. + type: boolean + skipCRDs: + description: SkipCRDs will mark this Helm release to skip the creation + of CRDs during a Helm 3 installation. + type: boolean + targetNamespace: + description: TargetNamespace overrides the targeted namespace for the + Helm release. The default namespace equals to the namespace of the + HelmRelease resource. + type: string + test: + description: The test settings for this Helm release. + type: object + properties: + cleanup: + description: Cleanup, when targeting Helm 2, determines whether + to delete test pods between each test run initiated by the Helm + Operator. + type: boolean + enable: + description: Enable will mark this Helm release for tests. + type: boolean + ignoreFailures: + description: IgnoreFailures will cause a Helm release to be rolled + back if it fails otherwise it will be left in a released state + type: boolean + timeout: + description: Timeout is the time to wait for any individual Kubernetes + operation (like Jobs for hooks) during test. + type: integer + format: int64 + timeout: + description: Timeout is the time to wait for any individual Kubernetes + operation (like Jobs for hooks) during installation and upgrade operations. + type: integer + format: int64 + valueFileSecrets: + description: ValueFileSecrets holds the local name references to secrets. + DEPRECATED, use ValuesFrom.secretKeyRef instead. + type: array + items: + type: object + required: + - name + properties: + name: + type: string + values: + description: Values holds the values for this Helm release. + type: object + valuesFrom: + type: array + items: + type: object + properties: + chartFileRef: + description: The reference to a local chart file with release + values. + type: object + required: + - path + properties: + optional: + description: Optional will mark this ChartFileSelector as + optional. The result of this are that operations are permitted + without the source, due to it e.g. being temporarily unavailable. + type: boolean + path: + description: Path is the file path to the source relative + to the chart root. + type: string + configMapKeyRef: + description: The reference to a config map with release values. + type: object + required: + - name + properties: + key: + type: string + name: + type: string + namespace: + type: string + optional: + type: boolean + externalSourceRef: + description: The reference to an external source with release + values. + type: object + required: + - url + properties: + optional: + description: Optional will mark this ExternalSourceSelector + as optional. The result of this are that operations are + permitted without the source, due to it e.g. being temporarily + unavailable. + type: boolean + url: + description: URL is the URL of the external source. + type: string + secretKeyRef: + description: The reference to a secret with release values. + type: object + required: + - name + properties: + key: + type: string + name: + type: string + namespace: + type: string + optional: + type: boolean + wait: + description: Wait will mark this Helm release to wait until all Pods, + PVCs, Services, and minimum number of Pods of a Deployment, StatefulSet, + or ReplicaSet are in a ready state before marking the release as successful. + type: boolean + status: + description: HelmReleaseStatus contains status information about an HelmRelease. + type: object + properties: + conditions: + description: Conditions contains observations of the resource's state, + e.g., has the chart which it refers to been fetched. + type: array + items: + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last status update of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, one of ('ChartFetched', 'Deployed', + 'Released', 'RolledBack', 'Tested'). + type: string + enum: + - ChartFetched + - Deployed + - Released + - RolledBack + - Tested + lastAttemptedRevision: + description: LastAttemptedRevision is the revision of the latest chart + sync, and may be of a failed release. + type: string + observedGeneration: + description: ObservedGeneration is the most recent generation observed + by the operator. + type: integer + format: int64 + phase: + description: Phase the release is in, one of ('ChartFetched', 'ChartFetchFailed', + 'Installing', 'Upgrading', 'Deployed', 'DeployFailed', 'Testing', + 'TestFailed', 'Tested', 'Succeeded', 'RollingBack', 'RolledBack', + 'RollbackFailed') + type: string + enum: + - ChartFetched + - ChartFetchFailed + - Installing + - Upgrading + - Deployed + - DeployFailed + - Testing + - TestFailed + - Tested + - Succeeded + - Failed + - RollingBack + - RolledBack + - RollbackFailed + releaseName: + description: ReleaseName is the name as either supplied or generated. + type: string + releaseStatus: + description: ReleaseStatus is the status as given by Helm for the release + managed by this resource. + type: string + revision: + description: Revision holds the Git hash or version of the chart currently + deployed. + type: string + rollbackCount: + description: RollbackCount records the amount of rollback attempts made, + it is incremented after a rollback failure and reset after a successful + upgrade or revision change. + type: integer + format: int64 + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/ii/local-cluster/helm-operator/helm-operator.yaml b/ii/local-cluster/helm-operator/helm-operator.yaml new file mode 100644 index 0000000..809bf3a --- /dev/null +++ b/ii/local-cluster/helm-operator/helm-operator.yaml @@ -0,0 +1,185 @@ +--- +# Source: helm-operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helm-operator + labels: + app: helm-operator + chart: helm-operator-1.2.0 + release: helm-operator + heritage: Helm +--- +# Source: helm-operator/templates/git-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: helm-operator-git-deploy +type: Opaque +--- +# Source: helm-operator/templates/kube.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: helm-operator-kube-config +data: + config: | + apiVersion: v1 + clusters: [] + contexts: + - context: + cluster: "" + namespace: default + user: "" + name: default + current-context: default + kind: Config + preferences: {} + users: [] +--- +# Source: helm-operator/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: helm-operator + labels: + app: helm-operator + chart: helm-operator-1.2.0 + release: helm-operator + heritage: Helm +rules: + - apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' + - nonResourceURLs: + - '*' + verbs: + - '*' +--- +# Source: helm-operator/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: helm-operator + labels: + app: helm-operator + chart: helm-operator-1.2.0 + release: helm-operator + heritage: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: helm-operator +subjects: + - name: helm-operator + namespace: "helm-operator" + kind: ServiceAccount +--- +# Source: helm-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-operator + labels: + app: helm-operator + chart: helm-operator-1.2.0 + release: helm-operator + heritage: Helm +spec: + type: ClusterIP + ports: + - port: 3030 + targetPort: http + protocol: TCP + name: http + selector: + app: helm-operator + release: helm-operator +--- +# Source: helm-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helm-operator + labels: + app: helm-operator + chart: helm-operator-1.2.0 + release: helm-operator + heritage: Helm +spec: + replicas: 1 + selector: + matchLabels: + app: helm-operator + release: helm-operator + strategy: + type: Recreate + template: + metadata: + annotations: + checksum/repositories: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + labels: + app: helm-operator + release: helm-operator + spec: + serviceAccountName: helm-operator + volumes: + - name: config + configMap: + name: helm-operator-kube-config + defaultMode: 0600 + - name: git-key + secret: + secretName: helm-operator-git-deploy + defaultMode: 0400 + containers: + - name: flux-helm-operator + image: docker.io/fluxcd/helm-operator:1.2.0 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 3030 + livenessProbe: + httpGet: + port: 3030 + path: /healthz + initialDelaySeconds: 1 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + port: 3030 + path: /healthz + initialDelaySeconds: 1 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + volumeMounts: + - name: config + mountPath: /root/.kube + readOnly: true + - name: git-key + mountPath: /etc/fluxd/ssh + readOnly: true + args: + - --enabled-helm-versions=v3 + - --kubeconfig=/root/.kube/config + - --log-format=fmt + - --git-timeout=20s + - --git-poll-interval=5m + - --charts-sync-interval=3m + - --status-update-interval=30s + - --update-chart-deps=true + - --log-release-diffs=false + - --workers=4 + - --tiller-namespace=kube-system + resources: + requests: + cpu: 50m + memory: 64Mi diff --git a/ii/local-cluster/ingress-boot-ii-nz.yaml b/ii/local-cluster/ingress-boot-ii-nz.yaml new file mode 100644 index 0000000..4f2dd7c --- /dev/null +++ b/ii/local-cluster/ingress-boot-ii-nz.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: boot-ii-nz + namespace: sidero-system +spec: + rules: + - host: boot.ii.nz + http: + paths: + - backend: + service: + name: sidero-http + port: + number: 8081 + path: / + pathType: ImplementationSpecific diff --git a/ii/local-cluster/kind-config.yaml b/ii/local-cluster/kind-config.yaml new file mode 100644 index 0000000..899fb50 --- /dev/null +++ b/ii/local-cluster/kind-config.yaml @@ -0,0 +1,33 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + extraPortMappings: + - containerPort: 67 + hostPort: 67 + protocol: UDP + - containerPort: 69 + hostPort: 69 + protocol: UDP + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP + - containerPort: 4011 + hostPort: 4011 + protocol: UDP + - containerPort: 7472 + hostPort: 7472 + protocol: UDP + - containerPort: 8081 + hostPort: 8081 + protocol: TCP + kubeadmConfigPatches: + - | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" diff --git a/ii/local-cluster/local-path-provisioner/local-path-provisioner.yaml b/ii/local-cluster/local-path-provisioner/local-path-provisioner.yaml new file mode 100644 index 0000000..3c7154b --- /dev/null +++ b/ii/local-cluster/local-path-provisioner/local-path-provisioner.yaml @@ -0,0 +1,158 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "endpoints", "persistentvolumes", "pods" ] + verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: rancher/local-path-provisioner:v0.0.19 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/opt/local-path-provisioner"] + } + ] + } + setup: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + + mkdir -m 0777 -p ${absolutePath} + teardown: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + + rm -rf ${absolutePath} + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: busybox + imagePullPolicy: IfNotPresent + + diff --git a/ii/local-cluster/metallb/config.yaml b/ii/local-cluster/metallb/config.yaml new file mode 100644 index 0000000..c02b74c --- /dev/null +++ b/ii/local-cluster/metallb/config.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: metallb-system + name: config +data: + config: | + address-pools: + - name: default + protocol: layer2 + addresses: + - 192.168.1.20-192.168.1.30 diff --git a/ii/local-cluster/metallb/metallb.yaml b/ii/local-cluster/metallb/metallb.yaml new file mode 100644 index 0000000..2cdc4bd --- /dev/null +++ b/ii/local-cluster/metallb/metallb.yaml @@ -0,0 +1,394 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +spec: + allowPrivilegeEscalation: false + allowedCapabilities: [] + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_ADMIN + - NET_RAW + - SYS_ADMIN + allowedHostPaths: [] + defaultAddCapabilities: [] + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + hostIPC: false + hostNetwork: true + hostPID: false + hostPorts: + - max: 7472 + min: 7472 + privileged: true + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - secret + - emptyDir +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: controller + namespace: metallb-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: metallb + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:controller +rules: +- apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - update +- apiGroups: + - '' + resources: + - services/status + verbs: + - update +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - controller + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: metallb + name: metallb-system:speaker +rules: +- apiGroups: + - '' + resources: + - services + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resourceNames: + - speaker + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +rules: +- apiGroups: + - '' + resources: + - pods + verbs: + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:controller +subjects: +- kind: ServiceAccount + name: controller + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: metallb + name: metallb-system:speaker +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metallb-system:speaker +subjects: +- kind: ServiceAccount + name: speaker + namespace: metallb-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: config-watcher + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: config-watcher +subjects: +- kind: ServiceAccount + name: controller +- kind: ServiceAccount + name: speaker +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: metallb + name: pod-lister + namespace: metallb-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-lister +subjects: +- kind: ServiceAccount + name: speaker +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: metallb + component: speaker + name: speaker + namespace: metallb-system +spec: + selector: + matchLabels: + app: metallb + component: speaker + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: speaker + spec: + containers: + - args: + - --port=7472 + - --config=config + env: + - name: METALLB_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: METALLB_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: METALLB_ML_BIND_ADDR + valueFrom: + fieldRef: + fieldPath: status.podIP + # needed when another software is also using memberlist / port 7946 + #- name: METALLB_ML_BIND_PORT + # value: "7946" + - name: METALLB_ML_LABELS + value: "app=metallb,component=speaker" + - name: METALLB_ML_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: METALLB_ML_SECRET_KEY + valueFrom: + secretKeyRef: + name: memberlist + key: secretkey + image: metallb/speaker:v0.9.6 + imagePullPolicy: Always + name: speaker + ports: + - containerPort: 7472 + name: monitoring + resources: + limits: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_ADMIN + drop: + - ALL + readOnlyRootFilesystem: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: speaker + terminationGracePeriodSeconds: 2 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: metallb + component: controller + name: controller + namespace: metallb-system +spec: + revisionHistoryLimit: 3 + selector: + matchLabels: + app: metallb + component: controller + template: + metadata: + annotations: + prometheus.io/port: '7472' + prometheus.io/scrape: 'true' + labels: + app: metallb + component: controller + spec: + containers: + - args: + - --port=7472 + - --config=config + image: metallb/controller:v0.9.6 + imagePullPolicy: Always + name: controller + ports: + - containerPort: 7472 + name: monitoring + resources: + limits: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: controller + terminationGracePeriodSeconds: 0 diff --git a/ii/local-cluster/metallb/namespace.yaml b/ii/local-cluster/metallb/namespace.yaml new file mode 100644 index 0000000..003269b --- /dev/null +++ b/ii/local-cluster/metallb/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: metallb-system + labels: + app: metallb diff --git a/ii/local-cluster/nginx-ingress/nginx-ingress.yaml b/ii/local-cluster/nginx-ingress/nginx-ingress.yaml new file mode 100644 index 0000000..413abc5 --- /dev/null +++ b/ii/local-cluster/nginx-ingress/nginx-ingress.yaml @@ -0,0 +1,29 @@ +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: nginx-ingress + namespace: nginx-ingress +spec: + releaseName: nginx-ingress + chart: + repository: https://kubernetes.github.io/ingress-nginx + name: ingress-nginx + version: 3.30.0 + values: + controller: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + topologyKey: "kubernetes.io/hostname" + service: + type: LoadBalancer + externalTrafficPolicy: Local + defaultBackend: + enabled: false diff --git a/ii/local-cluster/nohup.out b/ii/local-cluster/nohup.out new file mode 100644 index 0000000..08e78ac --- /dev/null +++ b/ii/local-cluster/nohup.out @@ -0,0 +1,2 @@ +: +: diff --git a/ii/local-cluster/prepare-rpis-sidero.org b/ii/local-cluster/prepare-rpis-sidero.org new file mode 100644 index 0000000..7c64fe0 --- /dev/null +++ b/ii/local-cluster/prepare-rpis-sidero.org @@ -0,0 +1,46 @@ +#+TITLE: Prepare RPis Sidero + +The purpose of this doc is to run throught the process of preparing the Sidero deployment and Raspberry Pi 4Bs for management. It closely mirrors the documentation of [[https://www.sidero.dev/docs/v0.3/guides/rpi4-as-servers/][Sidero]] but is more specific for ii's local-cluster and is org-ified. + +* Download EEPROM for Network booting +Save this in the Downloads folder. +#+begin_src tmate :window prepare +( + cd ~/Downloads + curl -O -L https://github.com/raspberrypi/rpi-eeprom/releases/download/v2021.04.29-138a1/rpi-boot-eeprom-recovery-2021-04-29-vl805-000138a1-network.zip + ls ~/Downloads/rpi-boot-eeprom-recovery-*-network.zip +) +#+end_src + +* Format the SD card +#+begin_src tmate :window prepare +DEVICE=/dev/sdb1 +if mount | grep -q "${DEVICE}"; then + echo "'${DEVICE}' is still mounted." > /dev/stderr +fi +sudo mkfs.vfat -F32 /dev/sdb1 +#+end_src + +Next, mount the SD card. + +* Extract the network recovery +Insert an SD card and prepare the drive + +#+begin_src tmate :window prepare +DRIVE=/var/run/media/${USER}/* +echo ${DRIVE} + +unzip -d ${DRIVE} ~/Downloads/rpi-boot-eeprom-recovery-*-network.zip +#+end_src +(presumes one device connected) + +Next, unmount the SD card. + +* Flash the EEPROM +With the display connected, insert the SD card into the slot in the RPi and boot it. +The flash will be complete when the screen is green. + +* Boot into network recovery +Removing the SD card, boot the RPi into the network recovery. + +* diff --git a/ii/local-cluster/servers/136c6fe1/RPI_EFI.fd b/ii/local-cluster/servers/136c6fe1/RPI_EFI.fd new file mode 100644 index 0000000..bcf4ee6 Binary files /dev/null and b/ii/local-cluster/servers/136c6fe1/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/1f8570e2/RPI_EFI.fd b/ii/local-cluster/servers/1f8570e2/RPI_EFI.fd new file mode 100644 index 0000000..653691c Binary files /dev/null and b/ii/local-cluster/servers/1f8570e2/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/2bbd241a/RPI_EFI.fd b/ii/local-cluster/servers/2bbd241a/RPI_EFI.fd new file mode 100644 index 0000000..f2d19d0 Binary files /dev/null and b/ii/local-cluster/servers/2bbd241a/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/2cb186c5/RPI_EFI.fd b/ii/local-cluster/servers/2cb186c5/RPI_EFI.fd new file mode 100644 index 0000000..5134714 Binary files /dev/null and b/ii/local-cluster/servers/2cb186c5/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/407d7434/RPI_EFI.fd b/ii/local-cluster/servers/407d7434/RPI_EFI.fd new file mode 100644 index 0000000..9b10d4f Binary files /dev/null and b/ii/local-cluster/servers/407d7434/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/4b1fcf44/RPI_EFI.fd b/ii/local-cluster/servers/4b1fcf44/RPI_EFI.fd new file mode 100644 index 0000000..0b8243e Binary files /dev/null and b/ii/local-cluster/servers/4b1fcf44/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/bc3ebf28/RPI_EFI.fd b/ii/local-cluster/servers/bc3ebf28/RPI_EFI.fd new file mode 100644 index 0000000..6c2816d Binary files /dev/null and b/ii/local-cluster/servers/bc3ebf28/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/bc3ef28/RPI_EFI.fd b/ii/local-cluster/servers/bc3ef28/RPI_EFI.fd new file mode 100644 index 0000000..ea6ea05 Binary files /dev/null and b/ii/local-cluster/servers/bc3ef28/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/bf267951/RPI_EFI.fd b/ii/local-cluster/servers/bf267951/RPI_EFI.fd new file mode 100644 index 0000000..03e4207 Binary files /dev/null and b/ii/local-cluster/servers/bf267951/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/c3052218/RPI_EFI.fd b/ii/local-cluster/servers/c3052218/RPI_EFI.fd new file mode 100644 index 0000000..640dc7e Binary files /dev/null and b/ii/local-cluster/servers/c3052218/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/d997b14e/RPI_EFI.fd b/ii/local-cluster/servers/d997b14e/RPI_EFI.fd new file mode 100644 index 0000000..6032f93 Binary files /dev/null and b/ii/local-cluster/servers/d997b14e/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/dd24784d/RPI_EFI.fd b/ii/local-cluster/servers/dd24784d/RPI_EFI.fd new file mode 100644 index 0000000..ab2ab8d Binary files /dev/null and b/ii/local-cluster/servers/dd24784d/RPI_EFI.fd differ diff --git a/ii/local-cluster/servers/ebc28a3f/RPI_EFI.fd b/ii/local-cluster/servers/ebc28a3f/RPI_EFI.fd new file mode 100644 index 0000000..9bd3143 Binary files /dev/null and b/ii/local-cluster/servers/ebc28a3f/RPI_EFI.fd differ diff --git a/ii/local-cluster/sidero-cm-debug.yaml b/ii/local-cluster/sidero-cm-debug.yaml new file mode 100644 index 0000000..6454668 --- /dev/null +++ b/ii/local-cluster/sidero-cm-debug.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sidero-debug + namespace: sidero-system +spec: + hostNetwork: true + containers: + - image: alpine:3.12 + name: sidero-debug + securityContext: + privileged: true + command: + - sleep + - infinity + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - sidero + topologyKey: "kubernetes.io/hostname" + dnsPolicy: ClusterFirst + restartPolicy: Always diff --git a/ii/local-cluster/sidero-controller-manager-debug.yaml b/ii/local-cluster/sidero-controller-manager-debug.yaml new file mode 100644 index 0000000..8b2d0bb --- /dev/null +++ b/ii/local-cluster/sidero-controller-manager-debug.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sidero-debug + namespace: sidero-system +spec: + hostNetwork: true + containers: + - image: alpine:3.12 + name: sidero-debug + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/sidero/tftp + name: tftp-folder + command: + - sh + - -c + - apk add tar && sleep infinity + volumes: + - name: tftp-folder + persistentVolumeClaim: + claimName: sidero-tftp diff --git a/ii/local-cluster/sidero-controller-manager-patch.yaml b/ii/local-cluster/sidero-controller-manager-patch.yaml new file mode 100644 index 0000000..069e963 --- /dev/null +++ b/ii/local-cluster/sidero-controller-manager-patch.yaml @@ -0,0 +1,12 @@ +spec: + template: + spec: + volumes: + - name: tftp-folder + persistentVolumeClaim: + claimName: sidero-tftp + containers: + - name: manager + volumeMounts: + - mountPath: /var/lib/sidero/tftp + name: tftp-folder diff --git a/ii/local-cluster/sidero-controller-manager-tftp-pvc.yaml b/ii/local-cluster/sidero-controller-manager-tftp-pvc.yaml new file mode 100644 index 0000000..b7251c8 --- /dev/null +++ b/ii/local-cluster/sidero-controller-manager-tftp-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sidero-tftp + namespace: sidero-system +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/ii/local-cluster/sidero/local-cluster-rpi-template.yaml b/ii/local-cluster/sidero/local-cluster-rpi-template.yaml new file mode 100644 index 0000000..66b8d40 --- /dev/null +++ b/ii/local-cluster/sidero/local-cluster-rpi-template.yaml @@ -0,0 +1,123 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.244.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalCluster + name: ${CLUSTER_NAME} + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: ${CLUSTER_NAME}-cp +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalCluster +metadata: + name: ${CLUSTER_NAME} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT} + port: ${CONTROL_PLANE_PORT} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalMachineTemplate +metadata: + name: ${CLUSTER_NAME}-cp +spec: + template: + spec: + serverClassRef: + apiVersion: metal.sidero.dev/v1alpha1 + kind: ServerClass + name: ${CONTROL_PLANE_SERVERCLASS} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: TalosControlPlane +metadata: + name: ${CLUSTER_NAME}-cp +spec: + version: ${KUBERNETES_VERSION} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + kind: MetalMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: ${CLUSTER_NAME}-cp + controlPlaneConfig: + init: + generateType: init + talosVersion: ${TALOS_VERSION} + configPatches: + - op: add + path: /machine/network + value: + interfaces: + - interface: eth0 + dhcp: true + vip: + ip: ${CONTROL_PLANE_ENDPOINT} + controlplane: + generateType: controlplane + talosVersion: ${TALOS_VERSION} + configPatches: + - op: add + path: /machine/network + value: + interfaces: + - interface: eth0 + dhcp: true + vip: + ip: ${CONTROL_PLANE_ENDPOINT} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: ${CLUSTER_NAME}-workers +spec: + template: + spec: + generateType: join + talosVersion: ${TALOS_VERSION} +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-workers +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + version: ${KUBERNETES_VERSION} + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + name: ${CLUSTER_NAME}-workers + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalMachineTemplate + name: ${CLUSTER_NAME}-workers +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalMachineTemplate +metadata: + name: ${CLUSTER_NAME}-workers +spec: + template: + spec: + serverClassRef: + apiVersion: metal.sidero.dev/v1alpha1 + kind: ServerClass + name: ${WORKER_SERVERCLASS} diff --git a/ii/local-cluster/sidero/local-clusters/local-cluster-mgmt.yaml b/ii/local-cluster/sidero/local-clusters/local-cluster-mgmt.yaml new file mode 100644 index 0000000..88aaabb --- /dev/null +++ b/ii/local-cluster/sidero/local-clusters/local-cluster-mgmt.yaml @@ -0,0 +1,130 @@ +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: local-cluster-mgmt + namespace: local-clusters +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.244.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + kind: TalosControlPlane + name: local-cluster-mgmt-cp + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalCluster + name: local-cluster-mgmt +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalCluster +metadata: + name: local-cluster-mgmt + namespace: local-clusters +spec: + controlPlaneEndpoint: + host: 192.168.1.31 + port: 6443 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalMachineTemplate +metadata: + name: local-cluster-mgmt-cp + namespace: local-clusters +spec: + template: + spec: + serverClassRef: + apiVersion: metal.sidero.dev/v1alpha1 + kind: ServerClass + name: raspberrypi4-servers +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +kind: TalosControlPlane +metadata: + name: local-cluster-mgmt-cp + namespace: local-clusters +spec: + controlPlaneConfig: + controlplane: + configPatches: + - op: add + path: /machine/network + value: + interfaces: + - dhcp: true + interface: eth0 + vip: + ip: 192.168.1.31 + generateType: controlplane + talosVersion: v1.11.0 + init: + configPatches: + - op: add + path: /machine/network + value: + interfaces: + - dhcp: true + interface: eth0 + vip: + ip: 192.168.1.31 + generateType: init + talosVersion: v1.11.0 + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalMachineTemplate + name: local-cluster-mgmt-cp + replicas: 1 + version: v1.21.2 +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: TalosConfigTemplate +metadata: + name: local-cluster-mgmt-workers + namespace: local-clusters +spec: + template: + spec: + generateType: join + talosVersion: v1.11.0 +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: local-cluster-mgmt-workers + namespace: local-clusters +spec: + clusterName: local-cluster-mgmt + replicas: 5 + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: TalosConfigTemplate + name: local-cluster-mgmt-workers + clusterName: local-cluster-mgmt + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: MetalMachineTemplate + name: local-cluster-mgmt-workers + version: v1.21.2 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: MetalMachineTemplate +metadata: + name: local-cluster-mgmt-workers + namespace: local-clusters +spec: + template: + spec: + serverClassRef: + apiVersion: metal.sidero.dev/v1alpha1 + kind: ServerClass + name: raspberrypi4-servers diff --git a/ii/local-cluster/sidero/rpi-environment.yaml b/ii/local-cluster/sidero/rpi-environment.yaml new file mode 100644 index 0000000..3d46959 --- /dev/null +++ b/ii/local-cluster/sidero/rpi-environment.yaml @@ -0,0 +1,26 @@ +apiVersion: metal.sidero.dev/v1alpha1 +kind: Environment +metadata: + name: raspberrypi4-servers +spec: + initrd: + url: https://github.com/talos-systems/talos/releases/download/v0.11.0/initramfs-arm64.xz + kernel: + args: + - console=tty0 + - console=ttyS0 + - consoleblank=0 + - earlyprintk=ttyS0 + - ima_appraise=fix + - ima_hash=sha512 + - ima_template=ima-ng + - init_on_alloc=1 + - initrd=initramfs.xz + - nvme_core.io_timeout=4294967295 + - printk.devkmsg=on + - pti=on + - random.trust_cpu=on + - slab_nomerge= + - talos.config=http://192.168.1.21:8081/configdata?uuid=${uuid} + - talos.platform=metal + url: https://github.com/talos-systems/talos/releases/download/v0.11.0/vmlinuz-arm64 diff --git a/ii/local-cluster/sidero/rpi-serverclass.yaml b/ii/local-cluster/sidero/rpi-serverclass.yaml new file mode 100644 index 0000000..83d6437 --- /dev/null +++ b/ii/local-cluster/sidero/rpi-serverclass.yaml @@ -0,0 +1,26 @@ +apiVersion: metal.sidero.dev/v1alpha1 +kind: ServerClass +metadata: + name: raspberrypi4-servers +spec: + environmentRef: + name: raspberrypi4-servers + configPatches: + # - op: add + # path: /cluster/allowSchedulingOnMasters + # value: true + - op: replace + path: /machine/install + value: + disk: /dev/mmcblk1 + image: ghcr.io/talos-systems/installer:v0.11.0 + bootloader: true + wipe: false + force: false + qualifiers: + cpu: + - manufacturer: Broadcom + version: "BCM2711 (ARM Cortex-A72)" + systemInformation: + - manufacturer: "Raspberry Pi Foundation" + productName: "Raspberry Pi 4 Model B" diff --git a/ii/local-cluster/talos-config-patches.patch b/ii/local-cluster/talos-config-patches.patch new file mode 100644 index 0000000..a4a53a7 --- /dev/null +++ b/ii/local-cluster/talos-config-patches.patch @@ -0,0 +1,52 @@ +diff --git a/talos/controlplane.yaml b/talos/controlplane.yaml +index bc87738..cf17a8a 100644 +--- a/talos/controlplane.yaml ++++ b/talos/controlplane.yaml +@@ -35,7 +35,12 @@ machine: + # - rw + + # Provides machine specific network configuration options. +- network: {} ++ network: ++ interfaces: ++ - interface: eth0 ++ dhcp: true ++ vip: ++ ip: 192.168.1.100 + # # `interfaces` is used to define the network interface configuration. + # interfaces: + # - interface: eth0 # The interface name. +@@ -214,6 +219,7 @@ machine: + # slot: 0 # Key slot number for luks2 encryption. + # Provides cluster specific configuration options. + cluster: ++ allowSchedulingOnMasters: true + # Provides control plane specific configuration options. + controlPlane: + endpoint: https://192.168.1.100:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname. +diff --git a/talos/init.yaml b/talos/init.yaml +index ad6d34e..46bdafd 100644 +--- a/talos/init.yaml ++++ b/talos/init.yaml +@@ -35,7 +35,12 @@ machine: + # - rw + + # Provides machine specific network configuration options. +- network: {} ++ network: ++ interfaces: ++ - interface: eth0 ++ dhcp: true ++ vip: ++ ip: 192.168.1.100 + # # `interfaces` is used to define the network interface configuration. + # interfaces: + # - interface: eth0 # The interface name. +@@ -214,6 +219,7 @@ machine: + # slot: 0 # Key slot number for luks2 encryption. + # Provides cluster specific configuration options. + cluster: ++ allowSchedulingOnMasters: true + # Provides control plane specific configuration options. + controlPlane: + endpoint: https://192.168.1.100:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname. diff --git a/ii/local-cluster/tftp-root/SERIAL/bcm2711-rpi-4-b.dtb b/ii/local-cluster/tftp-root/SERIAL/bcm2711-rpi-4-b.dtb new file mode 120000 index 0000000..b271be6 --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/bcm2711-rpi-4-b.dtb @@ -0,0 +1 @@ +../bcm2711-rpi-4-b.dtb \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/config.txt b/ii/local-cluster/tftp-root/SERIAL/config.txt new file mode 120000 index 0000000..f1a6cc6 --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/config.txt @@ -0,0 +1 @@ +../config.txt \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/firmware b/ii/local-cluster/tftp-root/SERIAL/firmware new file mode 120000 index 0000000..531db7a --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/firmware @@ -0,0 +1 @@ +../firmware \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/fixup4.dat b/ii/local-cluster/tftp-root/SERIAL/fixup4.dat new file mode 120000 index 0000000..6f1d3d7 --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/fixup4.dat @@ -0,0 +1 @@ +../fixup4.dat \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/ipxe-arm64.efi b/ii/local-cluster/tftp-root/SERIAL/ipxe-arm64.efi new file mode 120000 index 0000000..857baa1 --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/ipxe-arm64.efi @@ -0,0 +1 @@ +../ipxe-arm64.efi \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/ipxe.efi b/ii/local-cluster/tftp-root/SERIAL/ipxe.efi new file mode 120000 index 0000000..6f9a03d --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/ipxe.efi @@ -0,0 +1 @@ +../ipxe.efi \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/overlays b/ii/local-cluster/tftp-root/SERIAL/overlays new file mode 120000 index 0000000..0d44a21 --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/overlays @@ -0,0 +1 @@ +../overlays \ No newline at end of file diff --git a/ii/local-cluster/tftp-root/SERIAL/start4.elf b/ii/local-cluster/tftp-root/SERIAL/start4.elf new file mode 120000 index 0000000..a4b9efe --- /dev/null +++ b/ii/local-cluster/tftp-root/SERIAL/start4.elf @@ -0,0 +1 @@ +../start4.elf \ No newline at end of file diff --git a/ii/local-cluster/wireguard.yaml b/ii/local-cluster/wireguard.yaml new file mode 100644 index 0000000..7c244e7 --- /dev/null +++ b/ii/local-cluster/wireguard.yaml @@ -0,0 +1,113 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: wireguard +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + namespace: wireguard + name: wireguard-pvc +spec: + storageClassName: local-path + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Mi +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: wireguard-config + namespace: wireguard +data: + PUID: "1000" + PGID: "1000" + TZ: "Pacific/Auckland" + SERVERPORT: "51820" + SERVERURL: "wires-and-guards.ii.nz" + PEERS: "5" + # PEERDNS: "10.43.0.30" + PEERDNS: "auto" + ALLOWEDIPS: "0.0.0.0/0, ::/0" + INTERNAL_SUBNET: "10.13.13.0" +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: wireguard + namespace: wireguard + labels: + app: wireguard +spec: + serviceName: "wireguard" + replicas: 1 + selector: + matchLabels: + app: wireguard + template: + metadata: + labels: + app: wireguard + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - ii-thinkpad-p70 + containers: + - name: wireguard + image: docker.io/linuxserver/wireguard:v1.0.20210424-ls36 + envFrom: + - configMapRef: + name: wireguard-config + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - name: wg-config + mountPath: /config + - name: host-volumes + mountPath: /lib/modules + ports: + - containerPort: 51820 + protocol: UDP + resources: + requests: + memory: "64Mi" + cpu: "10m" + limits: + memory: "128Mi" + cpu: "100m" + volumes: + - name: wg-config + persistentVolumeClaim: + claimName: wireguard-pvc + - name: host-volumes + hostPath: + path: /lib/modules + type: Directory +--- +apiVersion: v1 +kind: Service +metadata: + name: wireguard + namespace: wireguard +spec: + selector: + app: wireguard + ports: + - name: wireguard + port: 51820 + targetPort: 51820 + protocol: UDP + externalTrafficPolicy: Local + type: LoadBalancer diff --git a/ii/machine-setup.org b/ii/machine-setup.org index 3fef40c..4552d04 100644 --- a/ii/machine-setup.org +++ b/ii/machine-setup.org @@ -247,5 +247,4 @@ tmate -S /tmp/ii-tmate.sock new-session -A -s k8s -c ~/go/src/kubernetes # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil # End: diff --git a/ii/mattermost-ii-coop/README.org b/ii/mattermost-ii-coop/README.org new file mode 100644 index 0000000..075da77 --- /dev/null +++ b/ii/mattermost-ii-coop/README.org @@ -0,0 +1,261 @@ +#+TITLE: Mattermost + +* Mattermost + +#+begin_quote +A ChatOps service +#+end_quote + +#+begin_src tmate :dir . :window mattermost +kubectl create ns mattermost-ii-coop +#+end_src + +#+begin_src tmate :dir . :window mattermost + export KUBECONFIG=~/.kube/config-gitlab-ii-coop +#+end_src + +#+NAME: Get LoadBalancer IP +#+begin_src tmate :dir . :window mattermost + export LOAD_BALANCER_IP=$(kubectl -n kube-system get cm kubeadm-config -o=jsonpath='{.data.ClusterConfiguration}' | yq '.controlPlaneEndpoint' -cr | cut -d ':' -f1) +#+end_src + +#+NAME: Assign DNS address +#+begin_src yaml :tangle ./dnsendpoint-mattermost-ii-coop.yaml +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} +spec: + endpoints: + - dnsName: ns1.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} + recordTTL: 60 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} + recordTTL: 60 + recordType: NS + targets: + - ns1.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} +#+end_src + +#+begin_src tmate :dir . :window mattermost + envsubst < dnsendpoint-mattermost-ii-coop.yaml | KUBECONFIG= kubectl -n mattermost-ii-coop apply -f - +#+end_src + +#+NAME: DNSEndpoint +#+begin_src yaml :tangle ./dnsendpoint.yaml +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: mattermost-ii-coop +spec: + endpoints: + - dnsName: 'mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}' + recordTTL: 3600 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: '*.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}' + recordTTL: 3600 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} + recordTTL: 3600 + recordType: SOA + targets: + - 'ns1.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}. hostmaster.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}. 5 3600 3600 3600 3600' +#+end_src + +#+begin_src tmate :dir . :window mattermost + envsubst < dnsendpoint.yaml | kubectl -n powerdns apply -f - +#+end_src + +#+name: create-namespace +#+begin_src tmate :dir . :window mattermost +kubectl get ns mattermost || kubectl create ns mattermost +#+end_src + +** Install Postgres-Operator +#+NAME: Postgres operator +#+begin_src yaml :tangle ./postgres-operator.yaml + apiVersion: helm.fluxcd.io/v1 + kind: HelmRelease + metadata: + name: postgres-operator + spec: + releaseName: postgres-operator + chart: + git: https://github.com/zalando/postgres-operator.git + ref: master + path: charts/postgres-operator +#+end_src + +#+begin_src tmate :dir . :window mattermost +kubectl create ns postgres-operator +kubectl -n postgres-operator apply -f ./postgres-operator.yaml +#+end_src + +** Install Postgresql-HA +#+name: postgres-database +#+begin_src yaml :tangle ./postgresql.yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: mattermost-db +spec: + enableConnectionPooler: true + connectionPooler: + mode: session + resources: + requests: + cpu: 250m + memory: 100Mi + limits: + cpu: "1" + memory: 100Mi + teamId: "mattermost" + volume: + size: 3Gi + numberOfInstances: 3 + users: + mattermost: # database owner + - superuser + - createdb + databases: + mattermost: mattermost # dbname: owner + postgresql: + version: "13" +#+end_src + +#+name: install-postgres-database +#+begin_src tmate :dir . :window mattermost +kubectl -n mattermost apply -f ./postgresql.yaml +#+end_src + +** Install MinIO Operator +Create the namespace: +#+name: create-minio-namespace +#+begin_src tmate :dir . :window mattermost +kubectl create ns minio-operator +#+end_src + +Download the latest manifests: +#+name: download-minio-operator-manifests +#+begin_src tmate :dir . :window mattermost +curl -O -L https://raw.githubusercontent.com/mattermost/mattermost-operator/v1.11.1/docs/minio-operator/minio-operator.yaml +#+end_src + +Install the operator: +#+name: install-minio-operator +#+begin_src tmate :dir . :window mattermost +kubectl -n minio-operator apply -f ./minio-operator.yaml +#+end_src + +** Install Mattermost Operator +Create the namespace: +#+name: create-mattermost-operator-namespace +#+begin_src tmate :dir . :window mattermost +kubectl create ns mattermost-operator +#+end_src + +Download the latest manifests: +#+name: download-mattermost-operator-manifests +#+begin_src tmate :dir . :window mattermost +curl -O -L https://raw.githubusercontent.com/mattermost/mattermost-operator/v1.14.0/docs/mattermost-operator/mattermost-operator.yaml +#+end_src + +Install the operator: +#+name: install-mattermost-operator +#+begin_src tmate :dir . :window mattermost +kubectl apply -n mattermost-operator -f ./mattermost-operator.yaml +#+end_src + +** Install Mattermost +*** Operator configuration +#+name: mattermost-cluster-definition +#+begin_src yaml :tangle ./mattermost-clusterinstallation.yaml + apiVersion: installation.mattermost.com/v1beta1 + kind: Mattermost + metadata: + name: mattermost # Name of your cluster as shown in Kubernetes. + spec: + image: mattermost/mattermost-enterprise-edition # Docker image for the app servers. + size: 1000users # Size of the Mattermost installation, typically based on the number of users. This a is write-only field - its value is erased after setting appropriate values of resources. Automatically sets the replica and resource limits for Minio, databases and app servers based on the number provided here. Accepts 100users, 1000users, 5000users, 10000users, or 25000users. Manually setting replicas or resources will override the values set by 'size'. + useServiceLoadBalancer: false # Set to true to use AWS or Azure load balancers instead of an NGINX controller. + serviceAnnotations: {} # Service annotations to use with AWS or Azure load balancers. + ingressAnnotations: + kubernetes.io/ingress.class: nginx + ingressName: mattermost.${SHARINGIO_PAIR_BASE_DNS_NAME} # Set to your hostname, e.g. example.mattermost-example.com. Required when using an Ingress controller. Ignored if useServiceLoadBalancer is true. + useIngressTLS: true + database: + external: + secret: "mattermost-database" # Set to the name of a Kubernetes secret that contains the password to your external MySQL database. MySQL username must be "root". + fileStore: + operatorManaged: + storageSize: 10Gi # Set the file storage size to be used by Minio. + replicas: 4 + elasticSearch: + host: "" # Hostname Elasticsearch can be accessed at. + username: "" # Username to log into Elasticsearch. + password: "" # Password to log into Elasticsearch. + scheduling: + nodeSelector: {} # See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector. + affinity: {} # See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity. +#+end_src + +#+name: wait-for-mattermost-db-creds +#+begin_src tmate :dir . :window mattermost +until kubectl -n mattermost get secret/mattermost.mattermost-db.credentials.postgresql.acid.zalan.do; do + sleep 5s +done +#+end_src + +#+name: create-mattermost-database-connection-string +#+begin_src tmate :dir . :window mattermost +POSTGRES_PASSWORD="$(kubectl -n mattermost get secret mattermost.mattermost-db.credentials.postgresql.acid.zalan.do -o=jsonpath='{.data.password}' | base64 -d)" +kubectl -n mattermost create secret generic mattermost-database --from-literal=DB_CONNECTION_STRING="postgres://mattermost:$POSTGRES_PASSWORD@mattermost-db-pooler:5432/mattermost?sslmode=require" +#+end_src + +#+name: install-mattermost-cluster +#+begin_src tmate :dir . :window mattermost +envsubst < mattermost-clusterinstallation.yaml | kubectl -n mattermost apply -f - +#+end_src + +** TODO Certs +#+begin_src yaml :tangle ./certs.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod-mattermost +spec: + secretName: $CERT_SECRET_NAME + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + group: cert-manager.io + commonName: "mattermost.${SHARINGIO_PAIR_BASE_DNS_NAME}" + dnsNames: + - "mattermost.${SHARINGIO_PAIR_BASE_DNS_NAME}" +#+end_src + +#+begin_src tmate :dir . :window mattermost +export CERT_SECRET_NAME="mattermost-${SHARINGIO_PAIR_BASE_DNS_NAME//./-}-tls-cert" +envsubst < certs.yaml #| kubectl -n mattermost apply -f - +#+end_src + +** TODO Migration +https://github.com/mattermost/mattermost-operator/tree/v1.14.0#restore-an-existing-mattermost-mysql-database + +** Final configurations + +*** File Storage + +Navigate to System Console > Environment > File Storage. + +Set File System Storage to Amazon S3. +Set Enable Secure Amazon S3 Connections to _false_. + +* Notes and references +- https://github.com/mattermost/mattermost-operator/blob/v1.14.0/docs/examples/mattermost_full.yaml diff --git a/ii/mattermost-ii-coop/certs.yaml b/ii/mattermost-ii-coop/certs.yaml new file mode 100644 index 0000000..ed1bd0d --- /dev/null +++ b/ii/mattermost-ii-coop/certs.yaml @@ -0,0 +1,15 @@ +# TODO Certs + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod-mattermost +spec: + secretName: $CERT_SECRET_NAME + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + group: cert-manager.io + commonName: "mattermost.${SHARINGIO_PAIR_BASE_DNS_NAME}" + dnsNames: + - "mattermost.${SHARINGIO_PAIR_BASE_DNS_NAME}" diff --git a/ii/mattermost-ii-coop/dnsendpoint-mattermost-ii-coop.yaml b/ii/mattermost-ii-coop/dnsendpoint-mattermost-ii-coop.yaml new file mode 100644 index 0000000..0e9dc99 --- /dev/null +++ b/ii/mattermost-ii-coop/dnsendpoint-mattermost-ii-coop.yaml @@ -0,0 +1,20 @@ + + +# #+NAME: Assign DNS address + +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} +spec: + endpoints: + - dnsName: ns1.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} + recordTTL: 60 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} + recordTTL: 60 + recordType: NS + targets: + - ns1.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} diff --git a/ii/mattermost-ii-coop/dnsendpoint.yaml b/ii/mattermost-ii-coop/dnsendpoint.yaml new file mode 100644 index 0000000..121a5fd --- /dev/null +++ b/ii/mattermost-ii-coop/dnsendpoint.yaml @@ -0,0 +1,25 @@ + + +# #+NAME: DNSEndpoint + +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: mattermost-ii-coop +spec: + endpoints: + - dnsName: 'mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}' + recordTTL: 3600 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: '*.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}' + recordTTL: 3600 + recordType: A + targets: + - ${LOAD_BALANCER_IP} + - dnsName: mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME} + recordTTL: 3600 + recordType: SOA + targets: + - 'ns1.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}. hostmaster.mattermosttest.${SHARINGIO_PAIR_BASE_DNS_NAME}. 5 3600 3600 3600 3600' diff --git a/ii/mattermost-ii-coop/mattermost-clusterinstallation.yaml b/ii/mattermost-ii-coop/mattermost-clusterinstallation.yaml new file mode 100644 index 0000000..5e0804b --- /dev/null +++ b/ii/mattermost-ii-coop/mattermost-clusterinstallation.yaml @@ -0,0 +1,30 @@ +# Operator configuration +# #+name: mattermost-cluster-definition + + apiVersion: installation.mattermost.com/v1beta1 + kind: Mattermost + metadata: + name: mattermost # Name of your cluster as shown in Kubernetes. + spec: + image: mattermost/mattermost-enterprise-edition # Docker image for the app servers. + size: 1000users # Size of the Mattermost installation, typically based on the number of users. This a is write-only field - its value is erased after setting appropriate values of resources. Automatically sets the replica and resource limits for Minio, databases and app servers based on the number provided here. Accepts 100users, 1000users, 5000users, 10000users, or 25000users. Manually setting replicas or resources will override the values set by 'size'. + useServiceLoadBalancer: false # Set to true to use AWS or Azure load balancers instead of an NGINX controller. + serviceAnnotations: {} # Service annotations to use with AWS or Azure load balancers. + ingressAnnotations: + kubernetes.io/ingress.class: nginx + ingressName: mattermost.${SHARINGIO_PAIR_BASE_DNS_NAME} # Set to your hostname, e.g. example.mattermost-example.com. Required when using an Ingress controller. Ignored if useServiceLoadBalancer is true. + useIngressTLS: true + database: + external: + secret: "mattermost-database" # Set to the name of a Kubernetes secret that contains the password to your external MySQL database. MySQL username must be "root". + fileStore: + operatorManaged: + storageSize: 10Gi # Set the file storage size to be used by Minio. + replicas: 4 + elasticSearch: + host: "" # Hostname Elasticsearch can be accessed at. + username: "" # Username to log into Elasticsearch. + password: "" # Password to log into Elasticsearch. + scheduling: + nodeSelector: {} # See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector. + affinity: {} # See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity. diff --git a/ii/mattermost-ii-coop/mattermost-operator.yaml b/ii/mattermost-ii-coop/mattermost-operator.yaml new file mode 100644 index 0000000..c5324b0 --- /dev/null +++ b/ii/mattermost-ii-coop/mattermost-operator.yaml @@ -0,0 +1,4469 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: clusterinstallations.mattermost.com +spec: + group: mattermost.com + names: + kind: ClusterInstallation + listKind: ClusterInstallationList + plural: clusterinstallations + singular: clusterinstallation + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of Mattermost + jsonPath: .status.state + name: State + type: string + - description: Image of Mattermost + jsonPath: .status.image + name: Image + type: string + - description: Version of Mattermost + jsonPath: .status.version + name: Version + type: string + - description: Endpoint + jsonPath: .status.endpoint + name: Endpoint + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterInstallation is the Schema for the clusterinstallations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Mattermost + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status' + properties: + affinity: + description: If specified, affinity will define the pod's scheduling + constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + blueGreen: + description: BlueGreen defines the configuration of BlueGreen deployment + for a ClusterInstallation + properties: + blue: + description: Blue defines the blue deployment. + properties: + image: + description: Image defines the base Docker image that will + be used for the deployment. Required when BlueGreen or Canary + is enabled. + type: string + ingressName: + description: IngressName defines the ingress name that will + be used by the deployment. This option is not used for Canary + builds. + type: string + name: + description: Name defines the name of the deployment + type: string + resourceLabels: + additionalProperties: + type: string + type: object + version: + description: Version defines the Docker image version that + will be used for the deployment. Required when BlueGreen + or Canary is enabled. + type: string + type: object + enable: + description: Enable defines if BlueGreen deployment will be applied. + type: boolean + green: + description: Green defines the green deployment. + properties: + image: + description: Image defines the base Docker image that will + be used for the deployment. Required when BlueGreen or Canary + is enabled. + type: string + ingressName: + description: IngressName defines the ingress name that will + be used by the deployment. This option is not used for Canary + builds. + type: string + name: + description: Name defines the name of the deployment + type: string + resourceLabels: + additionalProperties: + type: string + type: object + version: + description: Version defines the Docker image version that + will be used for the deployment. Required when BlueGreen + or Canary is enabled. + type: string + type: object + productionDeployment: + description: ProductionDeployment defines if the current production + is blue or green. + type: string + type: object + canary: + description: Canary defines the configuration of Canary deployment + for a ClusterInstallation + properties: + deployment: + description: Deployment defines the canary deployment. + properties: + image: + description: Image defines the base Docker image that will + be used for the deployment. Required when BlueGreen or Canary + is enabled. + type: string + ingressName: + description: IngressName defines the ingress name that will + be used by the deployment. This option is not used for Canary + builds. + type: string + name: + description: Name defines the name of the deployment + type: string + resourceLabels: + additionalProperties: + type: string + type: object + version: + description: Version defines the Docker image version that + will be used for the deployment. Required when BlueGreen + or Canary is enabled. + type: string + type: object + enable: + description: Enable defines if a canary build will be deployed. + type: boolean + type: object + database: + description: Database defines the database configuration for a ClusterInstallation. + properties: + backupRemoteDeletePolicy: + description: Defines the backup retention policy. + type: string + backupRestoreSecretName: + description: Defines the secret to be used when performing a database + restore. + type: string + backupSchedule: + description: Defines the interval for backups in cron expression + format. + type: string + backupSecretName: + description: Defines the secret to be used for uploading/restoring + backup. + type: string + backupURL: + description: Defines the object storage url for uploading backups. + type: string + initBucketURL: + description: Defines the AWS S3 bucket where the Database Backup + is stored. The operator will download the file to restore the + data. + type: string + replicas: + description: Defines the number of database replicas. For redundancy + use at least 2 replicas. Setting this will override the number + of replicas set by 'Size'. + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for the + database pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + secret: + description: "Optionally enter the name of an already-existing + Secret for connecting to the database. This secret should be + configured as follows: \n User-Managed Database - Key: DB_CONNECTION_STRING + | Value: Operator-Managed + Database - Key: ROOT_PASSWORD | Value: + \ - Key: USER | Value: - Key: PASSWORD | Value: + - Key: DATABASE Value: \n + Notes: If you define all secret values for both User-Managed + and Operator-Managed database types, the User-Managed connection + string will take precedence and the Operator-Managed values + will be ignored. If the secret is left blank, the default + behavior is to use an Operator-Managed database with strong + randomly-generated database credentials." + type: string + storageSize: + description: Defines the storage size for the database. ie 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: + description: Defines the type of database to use for an Operator-Managed + database. This value is ignored when using a User-Managed database. + type: string + type: object + elasticSearch: + description: ElasticSearch defines the ElasticSearch configuration + for a ClusterInstallation. + properties: + host: + type: string + password: + type: string + username: + type: string + type: object + image: + description: Image defines the ClusterInstallation Docker image. + type: string + imagePullPolicy: + description: Specify deployment pull policy. + type: string + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressName: + description: IngressName defines the name to be used when creating + the ingress rules + type: string + livenessProbe: + description: Defines the probe to check if the application is up and + running. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. + TCP hooks not yet supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + mattermostEnv: + description: Optional environment variables to set in the Mattermost + application pods. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + mattermostLicenseSecret: + description: Secret that contains the mattermost license + type: string + migrate: + description: 'Migrate specifies that the ClusterInstallation CR should + be migrated to the Mattermost CR. CAUTION: Some features like BlueGreen + or Canary are not supported with a new Custom Resource therefore + migration should be performed with extra caution.' + type: boolean + minio: + description: Minio defines the configuration of Minio for a ClusterInstallation. + properties: + externalBucket: + description: Set to the bucket name of your external MinIO or + S3. + type: string + externalURL: + description: Set to use an external MinIO deployment or S3. Must + also set 'Secret' and 'ExternalBucket'. + type: string + replicas: + description: 'Defines the number of Minio replicas. Supply 1 to + run Minio in standalone mode with no redundancy. Supply 4 or + more to run Minio in distributed mode. Note that it is not possible + to upgrade Minio from standalone to distributed mode. Setting + this will override the number of replicas set by ''Size''. More + info: https://docs.min.io/docs/distributed-minio-quickstart-guide.html' + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for the + Minio pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + secret: + description: 'Optionally enter the name of already existing secret. + Secret should have two values: "accesskey" and "secretkey". + Required when "ExternalURL" is set.' + type: string + storageSize: + description: Defines the storage size for Minio. ie 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for the + pod to fit on a node. Selector which must match a node''s labels + for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + readinessProbe: + description: Defines the probe to check if the application is ready + to accept traffic. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command is + simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. Minimum + value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod + IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows + repeated headers. + items: + description: HTTPHeader describes a custom header to be + used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults + to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. + TCP hooks not yet supported TODO: implement a realistic TCP + lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. + Number must be in the range 1 to 65535. Name must be an + IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + replicas: + description: Replicas defines the number of replicas to use for the + Mattermost app servers. Setting this will override the number of + replicas set by 'Size'. + format: int32 + type: integer + resourceLabels: + additionalProperties: + type: string + type: object + resources: + description: Defines the resource requests and limits for the Mattermost + app server pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceAnnotations: + additionalProperties: + type: string + type: object + size: + description: 'Size defines the size of the ClusterInstallation. This + is typically specified in number of users. This will override replica + and resource requests/limits appropriately for the provided number + of users. This is a write-only field - its value is erased after + setting appropriate values of resources. Accepted values are: 100users, + 1000users, 5000users, 10000users, 250000users. If replicas and resource + requests/limits are not specified, and Size is not provided the + configuration for 5000users will be applied. Setting ''Replicas'', + ''Resources'', ''Minio.Replicas'', ''Minio.Resource'', ''Database.Replicas'', + or ''Database.Resources'' will override the values set by Size. + Setting new Size will override previous values regardless if set + by Size or manually.' + type: string + useIngressTLS: + type: boolean + useServiceLoadBalancer: + type: boolean + version: + description: Version defines the ClusterInstallation Docker image + version. + type: string + required: + - ingressName + type: object + status: + description: 'Most recent observed status of the Mattermost cluster. Read-only. + Not included when requesting from the apiserver, only from the Mattermost + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status' + properties: + blueName: + description: The name of the blue deployment in BlueGreen + type: string + endpoint: + description: The endpoint to access the Mattermost instance + type: string + greenName: + description: The name of the green deployment in BlueGreen + type: string + image: + description: The image running on the pods in the Mattermost instance + type: string + migration: + description: The status of migration to Mattermost CR. + properties: + error: + type: string + status: + type: string + type: object + replicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment + format: int32 + type: integer + state: + description: Represents the running state of the Mattermost instance + type: string + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment that are running with the desired image. + format: int32 + type: integer + version: + description: The version currently running in the Mattermost instance + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: mattermostrestoredbs.mattermost.com +spec: + group: mattermost.com + names: + kind: MattermostRestoreDB + listKind: MattermostRestoreDBList + plural: mattermostrestoredbs + singular: mattermostrestoredb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of Mattermost DB Restore + jsonPath: .status.state + name: State + type: string + - description: Original DB Replicas + jsonPath: .status.originalDBReplicas + name: Original DB Replicas + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: MattermostRestoreDB is the Schema for the mattermostrestoredbs + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MattermostRestoreDBSpec defines the desired state of MattermostRestoreDB + properties: + initBucketURL: + description: InitBucketURL defines where the DB backup file is located. + type: string + mattermostClusterName: + description: MattermostClusterName defines the ClusterInstallation + name. + type: string + mattermostDBName: + description: MattermostDBName defines the database name. Need to set + if different from `mattermost`. + type: string + mattermostDBPassword: + description: MattermostDBPassword defines the user password to access + the database. Need to set if the user is different from the one + created by the operator. + type: string + mattermostDBUser: + description: MattermostDBUser defines the user to access the database. + Need to set if the user is different from `mmuser`. + type: string + restoreSecret: + description: RestoreSecret defines the secret that holds the credentials + to MySQL Operator be able to download the DB backup file + type: string + type: object + status: + description: MattermostRestoreDBStatus defines the observed state of MattermostRestoreDB + properties: + originalDBReplicas: + description: The original number of database replicas. will be used + to restore after applying the db restore process. + format: int32 + type: integer + state: + description: Represents the state of the Mattermost restore Database. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: mattermosts.installation.mattermost.com +spec: + group: installation.mattermost.com + names: + kind: Mattermost + listKind: MattermostList + plural: mattermosts + shortNames: + - mm + singular: mattermost + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of Mattermost + jsonPath: .status.state + name: State + type: string + - description: Image of Mattermost + jsonPath: .status.image + name: Image + type: string + - description: Version of Mattermost + jsonPath: .status.version + name: Version + type: string + - description: Endpoint + jsonPath: .status.endpoint + name: Endpoint + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: Mattermost is the Schema for the mattermosts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MattermostSpec defines the desired state of Mattermost + properties: + database: + description: External Services + properties: + external: + description: Defines the configuration of and external database. + properties: + secret: + description: 'Secret contains data necessary to connect to + the external database. The Kubernetes Secret should contain: - + Key: DB_CONNECTION_STRING | Value: Full database connection + string. It can also contain optional fields, such as: - + Key: MM_SQLSETTINGS_DATASOURCEREPLICAS | Value: Connection + string to read replicas of the database. - Key: DB_CONNECTION_CHECK_URL + | Value: The URL used for checking that the database is + accessible.' + type: string + type: object + operatorManaged: + description: Defines the configuration of database managed by + Kubernetes operator. + properties: + backupRemoteDeletePolicy: + description: Defines the backup retention policy. + type: string + backupRestoreSecretName: + description: Defines the secret to be used when performing + a database restore. + type: string + backupSchedule: + description: Defines the interval for backups in cron expression + format. + type: string + backupSecretName: + description: Defines the secret to be used for uploading/restoring + backup. + type: string + backupURL: + description: Defines the object storage url for uploading + backups. + type: string + initBucketURL: + description: Defines the AWS S3 bucket where the Database + Backup is stored. The operator will download the file to + restore the data. + type: string + replicas: + description: Defines the number of database replicas. For + redundancy use at least 2 replicas. Setting this will override + the number of replicas set by 'Size'. + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for + the database pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storageSize: + description: Defines the storage size for the database. ie + 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: + description: Defines the type of database to use for an Operator-Managed + database. + type: string + type: object + type: object + elasticSearch: + description: ElasticSearch defines the ElasticSearch configuration + for Mattermost. + properties: + host: + type: string + password: + type: string + username: + type: string + type: object + fileStore: + description: FileStore defines the file store configuration for Mattermost. + properties: + external: + description: Defines the configuration of an external file store. + properties: + bucket: + description: Set to the bucket name of your external MinIO + or S3. + type: string + secret: + description: 'Optionally enter the name of already existing + secret. Secret should have two values: "accesskey" and "secretkey".' + type: string + url: + description: Set to use an external MinIO deployment or S3. + type: string + type: object + operatorManaged: + description: Defines the configuration of file store managed by + Kubernetes operator. + properties: + replicas: + description: 'Defines the number of Minio replicas. Supply + 1 to run Minio in standalone mode with no redundancy. Supply + 4 or more to run Minio in distributed mode. Note that it + is not possible to upgrade Minio from standalone to distributed + mode. Setting this will override the number of replicas + set by ''Size''. More info: https://docs.min.io/docs/distributed-minio-quickstart-guide.html' + format: int32 + type: integer + resources: + description: Defines the resource requests and limits for + the Minio pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storageSize: + description: Defines the storage size for Minio. ie 50Gi + pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ + type: string + type: object + type: object + image: + description: Image defines the Mattermost Docker image. + type: string + imagePullPolicy: + description: Specify Mattermost deployment pull policy. + type: string + imagePullSecrets: + description: Specify Mattermost image pull secrets. + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressName: + description: IngressName defines the name to be used when creating + the ingress rules + type: string + licenseSecret: + description: LicenseSecret is the name of the secret containing a + Mattermost license. + type: string + mattermostEnv: + description: Optional environment variables to set in the Mattermost + application pods. + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + probes: + description: Probes defines configuration of liveness and readiness + probe for Mattermost pods. These settings generally don't need to + be changed. + properties: + livenessProbe: + description: Defines the probe to check if the application is + up and running. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + readinessProbe: + description: Defines the probe to check if the application is + ready to accept traffic. + properties: + exec: + description: One and only one of the following should be specified. + Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP + port. TCP hooks not yet supported TODO: implement a realistic + TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + type: object + replicas: + description: Replicas defines the number of replicas to use for the + Mattermost app servers. + format: int32 + type: integer + resourceLabels: + additionalProperties: + type: string + type: object + scheduling: + description: Scheduling defines the configuration related to scheduling + of the Mattermost pods as well as resource constraints. These settings + generally don't need to be changed. + properties: + affinity: + description: If specified, affinity will define the pod's scheduling + constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from + its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them are + ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. If the + operator is Gt or Lt, the values array + must have a single element, which will + be interpreted as an integer. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node that + violates one or more of the expressions. The node that + is most preferred is the one with the greatest sum of + weights, i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + anti-affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod + label update), the system may or may not try to eventually + evict the pod from its node. When there are multiple + elements, the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true for + the pod to fit on a node. Selector which must match a node''s + labels for the pod to be scheduled on that node. More info: + https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + resources: + description: Defines the resource requests and limits for the + Mattermost app server pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + type: object + serviceAnnotations: + additionalProperties: + type: string + type: object + size: + description: 'Size defines the size of the Mattermost. This is typically + specified in number of users. This will override replica and resource + requests/limits appropriately for the provided number of users. + This is a write-only field - its value is erased after setting appropriate + values of resources. Accepted values are: 100users, 1000users, 5000users, + 10000users, and 250000users. If replicas and resource requests/limits + are not specified, and Size is not provided the configuration for + 5000users will be applied. Setting ''Replicas'', ''Scheduling.Resources'', + ''FileStore.Replicas'', ''FileStore.Resource'', ''Database.Replicas'', + or ''Database.Resources'' will override the values set by Size. + Setting new Size will override previous values regardless if set + by Size or manually.' + type: string + useIngressTLS: + type: boolean + useServiceLoadBalancer: + type: boolean + version: + description: Version defines the Mattermost Docker image version. + type: string + volumeMounts: + description: Defines additional volumeMounts to add to Mattermost + application pods. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows for mounting volumes from various sources + into the Mattermost application pods. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver (Alpha feature). The volume's + lifecycle is tied to the pod that defines it - it will be + created before the pod starts, and deleted when the pod is + removed. \n Use this if: a) the volume is only needed while + the pod runs, b) features of normal volumes like restoring + from snapshot or capacity tracking are needed, c) the storage + driver is specified through a storage class, and d) the storage + driver supports dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) * An + existing custom resource that implements data + population (Alpha) In order to use custom resource + types that implement data population, the AnyVolumeDataSource + feature gate must be enabled. If the provisioner + or an external controller can support the specified + data source, it will create a new volume based + on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - ingressName + type: object + status: + description: MattermostStatus defines the observed state of Mattermost + properties: + endpoint: + description: The endpoint to access the Mattermost instance + type: string + image: + description: The image running on the pods in the Mattermost instance + type: string + replicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment + format: int32 + type: integer + state: + description: Represents the running state of the Mattermost instance + type: string + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Mattermost deployment that are running with the desired image. + format: int32 + type: integer + version: + description: The version currently running in the Mattermost instance + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mattermost-operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: mattermost-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - '*' +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - get + - create + - list + - delete + - watch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - mattermost-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - mattermost.com + resources: + - '*' + - clusterinstallations + - mattermostrestoredbs + verbs: + - '*' +- apiGroups: + - installation.mattermost.com + resources: + - '*' + verbs: + - '*' +- apiGroups: + - mysql.presslabs.org + resources: + - mysqlbackups + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - mysql.presslabs.org + resources: + - mysqlclusters + - mysqlclusters/status + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - miniocontroller.min.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - minio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - create + - list + - delete + - watch + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + - certificatesigningrequests/approval + - certificatesigningrequests/status + verbs: + - update + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: mattermost-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mattermost-operator +subjects: +- kind: ServiceAccount + name: mattermost-operator + namespace: mattermost-operator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mattermost-operator +spec: + replicas: 1 + selector: + matchLabels: + name: mattermost-operator + template: + metadata: + labels: + name: mattermost-operator + spec: + containers: + - args: + - --enable-leader-election + command: + - /mattermost-operator + env: + - name: MAX_RECONCILING_INSTALLATIONS + value: "20" + - name: REQUEUE_ON_LIMIT_DELAY + value: 20s + image: mattermost/mattermost-operator:v1.14.0 + imagePullPolicy: IfNotPresent + name: mattermost-operator + serviceAccountName: mattermost-operator +--- diff --git a/ii/mattermost-ii-coop/minio-operator.yaml b/ii/mattermost-ii-coop/minio-operator.yaml new file mode 100644 index 0000000..3e7df27 --- /dev/null +++ b/ii/mattermost-ii-coop/minio-operator.yaml @@ -0,0 +1,131 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: minioinstances.miniocontroller.min.io +spec: + group: miniocontroller.min.io + version: v1beta1 + scope: Namespaced + names: + kind: MinIOInstance + singular: minioinstance + plural: minioinstances + preserveUnknownFields: true + validation: + # openAPIV3Schema is the schema for validating custom objects. + # Refer https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#specifying-a-structural-schema + # for more details + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + replicas: + type: integer + minimum: 1 + maximum: 32 + version: + type: string + mountpath: + type: string + subpath: + type: string + additionalPrinterColumns: + - name: Replicas + type: integer + JSONPath: ".spec.replicas" +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: minio-operator-role +rules: +- apiGroups: + - "" + resources: + - namespaces + - secrets + - pods + - services + - events + verbs: + - get + - watch + - create + - list + - patch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - create + - list + - patch + - watch + - update +- apiGroups: + - "certificates.k8s.io" + resources: + - "certificatesigningrequests" + - "certificatesigningrequests/approval" + - "certificatesigningrequests/status" + verbs: + - update + - create + - get +- apiGroups: + - miniocontroller.min.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - min.io + resources: + - "*" + verbs: + - "*" +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: minio-operator-sa + namespace: minio-operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: minio-operator-binding + namespace: minio-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: minio-operator-role +subjects: +- kind: ServiceAccount + name: minio-operator-sa + namespace: minio-operator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio-operator + namespace: minio-operator +spec: + replicas: 1 + selector: + matchLabels: + name: minio-operator + template: + metadata: + labels: + name: minio-operator + spec: + serviceAccountName: minio-operator-sa + containers: + - name: minio-operator + image: minio/k8s-operator:1.0.7 + imagePullPolicy: IfNotPresent diff --git a/ii/mattermost-ii-coop/postgres-operator.yaml b/ii/mattermost-ii-coop/postgres-operator.yaml new file mode 100644 index 0000000..8e70003 --- /dev/null +++ b/ii/mattermost-ii-coop/postgres-operator.yaml @@ -0,0 +1,13 @@ +# Install Postgres-Operator +# #+NAME: Postgres operator + + apiVersion: helm.fluxcd.io/v1 + kind: HelmRelease + metadata: + name: postgres-operator + spec: + releaseName: postgres-operator + chart: + git: https://github.com/zalando/postgres-operator.git + ref: master + path: charts/postgres-operator diff --git a/ii/mattermost-ii-coop/postgresql.yaml b/ii/mattermost-ii-coop/postgresql.yaml new file mode 100644 index 0000000..f615326 --- /dev/null +++ b/ii/mattermost-ii-coop/postgresql.yaml @@ -0,0 +1,30 @@ +# Install Postgresql-HA +# #+name: postgres-database + +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: mattermost-db +spec: + enableConnectionPooler: true + connectionPooler: + mode: session + resources: + requests: + cpu: 250m + memory: 100Mi + limits: + cpu: "1" + memory: 100Mi + teamId: "mattermost" + volume: + size: 3Gi + numberOfInstances: 3 + users: + mattermost: # database owner + - superuser + - createdb + databases: + mattermost: mattermost # dbname: owner + postgresql: + version: "13" diff --git a/iimaginarium/data-center/planning.org b/iimaginarium/data-center/planning.org new file mode 100644 index 0000000..6b9737b --- /dev/null +++ b/iimaginarium/data-center/planning.org @@ -0,0 +1,56 @@ +#+TITLE: Data Center Planning +#+AUTHOR: Stephen Heywood +#+DATE: 8 January, 2020 +#+TODO: TODO(t) NEXT(n) IN_PROGRESS(i) BLOCKED(b) | TADA(d) +#+STARTUP: showeverything + + +* Goals + +To setup a local data center here in the iimaginarium using the current set of blade servers. The current timeline is to have the data center functional within 3 months. + +* Schedule: Hardware +** Environment + +- [ ] Review the physical space for the data center and how it can provide all easy access to maintain all the equipment +- [ ] How well can the current space cope with extra equipment? +- [ ] Review the temperature inside of the data center space and see how it can be managed. Will it require an A/C unit? +- [ ] Source a pair of temperature senors that could be used by a Pi to monitor the ambient temperature of the space +- [ ] Document the physical layout + +** Power + +- [ ] Get the Eaton UPS serviced +- [ ] Source link cable to the Eaton 5PX extended battery module +- [ ] Document the UPS connections. Include the connections on the main switchboard + +** Network + +- [ ] Reset the Juniper switch to factory defaults +- [ ] Confirm the connection between the internet, data-center and current iimaginarium network +- [ ] Confirm the connection plans for each of the IPMI NICs on the blades +- [ ] Document each both logical and physical networks + +** Blades +*** Audit + +- [ ] Check the health of each blade +- [ ] Repair the left power supply module case. The bottom of the case is bent which is stopping the fan +- [ ] Document each blade + +*** Storage + +- [ ] Check what the current storage each blade has +- [ ] Check what initial storage resources are required to meet the data center goals + +*** Network + +- [ ] For each blade, test IPMI access +- [ ] For each blade, test each network port + +* Schedule: Software +** Operating Systems +** Kubernetes +** Applications +** Services +** Client Requirements diff --git a/iimaginarium/data-center/resources.org b/iimaginarium/data-center/resources.org new file mode 100644 index 0000000..bafc7d1 --- /dev/null +++ b/iimaginarium/data-center/resources.org @@ -0,0 +1,49 @@ +#+TITLE: Data Center Resources +#+AUTHOR: Stephen Heywood +#+DATE: 8 January, 2020 +#+STARTUP: showeverything + + +* Hardware +** Blade Enclosure + +- Chassis: CSE-938 + +** Blade Servers + +- Model: [[https://www.supermicro.com/products/motherboard/Xeon/C600/X9SRD-F.cfm][SuperMicro X9SRD-F]] +- Quantity: 8 + +** UPS + +- Model: [[https://www.eaton.com/nz/en-gb/catalog/backup-power-ups-surge-it-power-distribution/eaton-5px-ups.html][Eaton 5PX 3000]] + +** Network +*** Juniper Switch + +- Model: Juniper EX2200 + +*** DL-Link + +- Model: + +* Software +** Operating System + +- [[https://ubuntu.com/server/docs][Ubuntu Server Documentation]] + +** Kubernetes + +- [[https://kubernetes.io/][kubernetes.io]] +- [[https://rook.io/][rook.io]] + +** Ceph (storage) + +- [[https://ceph.io/][ceph.io]] +- [[https://docs.ceph.com/docs/master/architecture/][Architecture]] +- [[https://ceph.io/install/][Install]] + +** DevOps + +- [[https://www.ansible.com/][ansible.com]] +- [[https://docs.ansible.com/][Ansible Documentation]] diff --git a/k8s.io/kubernetes/k8s.io/cloud-shell.org b/k8s.io/kubernetes/k8s.io/cloud-shell.org index fec3226..4f2207d 100644 --- a/k8s.io/kubernetes/k8s.io/cloud-shell.org +++ b/k8s.io/kubernetes/k8s.io/cloud-shell.org @@ -247,6 +247,5 @@ Some of these layers are 2GB, and quite a few are over 1GB. # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/k8s.io/kubernetes/k8s.io/dns/add-canary.org b/k8s.io/kubernetes/k8s.io/dns/add-canary.org index de2b85d..710a761 100644 --- a/k8s.io/kubernetes/k8s.io/dns/add-canary.org +++ b/k8s.io/kubernetes/k8s.io/dns/add-canary.org @@ -270,5 +270,4 @@ There were quite a few definitions dropped, but again this shouldn't affect test # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # End: diff --git a/k8s.io/kubernetes/k8s.io/iam.dump.org b/k8s.io/kubernetes/k8s.io/iam.dump.org index 4863516..507a719 100644 --- a/k8s.io/kubernetes/k8s.io/iam.dump.org +++ b/k8s.io/kubernetes/k8s.io/iam.dump.org @@ -286,6 +286,5 @@ identify each resource, them dump iam # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/k8s.io/kubernetes/k8s.io/image-promoter.org b/k8s.io/kubernetes/k8s.io/image-promoter.org index fe2d69c..db7c5ad 100644 --- a/k8s.io/kubernetes/k8s.io/image-promoter.org +++ b/k8s.io/kubernetes/k8s.io/image-promoter.org @@ -121,6 +121,5 @@ https://github.com/kubernetes/test-infra/pull/11414 # eval: (xclip-mode 1) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/k8s.io/kubernetes/kind.org b/k8s.io/kubernetes/kind.org index 02e952b..50eb4ee 100644 --- a/k8s.io/kubernetes/kind.org +++ b/k8s.io/kubernetes/kind.org @@ -144,6 +144,5 @@ Beware your copy paste methods and try pasting into a notepad first. # eval: (setq socket (concat "/tmp/" user-login-name "." (file-name-base buffer-file-name) ".iisocket")) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/k8s.io/kubernetes/kubeadm-dind.org b/k8s.io/kubernetes/kubeadm-dind.org index b9ef832..a931e76 100644 --- a/k8s.io/kubernetes/kubeadm-dind.org +++ b/k8s.io/kubernetes/kubeadm-dind.org @@ -792,7 +792,5 @@ echo foo # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil -# org-babel-tmux-session-prefix: "hh-" # End: diff --git a/k8s.io/kubernetes/kubectl.org b/k8s.io/kubernetes/kubectl.org index 6ff674c..26e30c4 100644 --- a/k8s.io/kubernetes/kubectl.org +++ b/k8s.io/kubernetes/kubectl.org @@ -262,7 +262,6 @@ export KUBECONFIG=~/.kube/config #or elsewhere # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # org-export-use-babel: nil # End: diff --git a/k8s.io/kubernetes/kubetest-dind.org b/k8s.io/kubernetes/kubetest-dind.org index 18a13e7..cedd1a3 100644 --- a/k8s.io/kubernetes/kubetest-dind.org +++ b/k8s.io/kubernetes/kubetest-dind.org @@ -815,5 +815,4 @@ kubeadm config migrate --new-config kubeadm.conf --old-config kubeadm.conf.orig # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil # End: diff --git a/k8s.io/kubernetes/kubetest-gke.org b/k8s.io/kubernetes/kubetest-gke.org index 67e8789..6ef9692 100644 --- a/k8s.io/kubernetes/kubetest-gke.org +++ b/k8s.io/kubernetes/kubetest-gke.org @@ -60,5 +60,4 @@ Skeleton Provider: prepare-e2e not implemented # eval: (require (quote ob-emacs-lisp)) # eval: (require (quote ob-js)) # eval: (require (quote ob-go)) -# org-confirm-babel-evaluate: nil # End: diff --git a/k8s.io/kubernetes/packet-setup.org b/k8s.io/kubernetes/packet-setup.org index 5f8bbe6..63c3d51 100644 --- a/k8s.io/kubernetes/packet-setup.org +++ b/k8s.io/kubernetes/packet-setup.org @@ -1030,6 +1030,5 @@ tmate -S /tmp/kind.kind-ci-box.iisocket new-session -A -s kind -n main \ # eval: (xclip-mode 0) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/learning/.dir-locals.el b/learning/.dir-locals.el index 5390173..89bfae0 100644 --- a/learning/.dir-locals.el +++ b/learning/.dir-locals.el @@ -1,371 +1,371 @@ -;;; Directory Local Variables -;;; For more information see (info "(emacs) Directory Variables") -( - (org-mode - (org-babel-tmate-session-prefix . "") - (org-babel-tmate-default-window-name . "main") - (org-confirm-babel-evaluate . nil) - (org-use-property-inheritance . t) - (org-file-dir . (file-name-directory buffer-file-name)) - (eval - . - (progn - ;; (let ((socket-arg (concat ":socket " "FLOOPIE" )))) - ;; (set (make-local-variable 'tmpdir) - ;; (make-temp-file (concat "/dev/shm/" user-buffer "-") t)) - (set (make-local-variable 'ssh-user) - "pair") - ;; user-login-name) - ;; might be nice to set this as a global property in the org file - (set (make-local-variable 'ssh-host) - "ii.cncf.ci") - (set (make-local-variable 'ssh-user-host) - (concat ssh-user "@" ssh-host)) - (set (make-local-variable 'time-stamp-zone) - "Pacific/Auckland") - (set (make-local-variable 'time-stamp-pattern) - ;; https://www.emacswiki.org/emacs/TimeStamp - "10/#+UPDATED: needs time-local formatted regexp") - (set (make-local-variable 'user-buffer) - (concat user-login-name "." (file-name-base load-file-name))) - (set (make-local-variable 'socket) - (concat "/tmp/" user-buffer ".target.iisocket")) - (set (make-local-variable 'socket-param) - (concat ":sockets " socket)) - (set (make-local-variable 'item-str) - "(nth 4 (org-heading-components))") - (set (make-local-variable 'togetherly-port) - (+ (random 60000) 1024)) - (set (make-local-variable 'org-file-properties) - (list - (cons 'header-args:tmate - (concat - ":noweb yes" - " :eval never-export" - " :noweb-ref " item-str - " :comments org" - " :results silent " - " :session (concat user-login-name \":main\" )" - ;; " :session (concat user-login-name \":\" " "main" ")" - ;; " :session (concat user-login-name \":\" " item-str ")" - " :socket " socket - " :window " user-login-name - " :terminal sakura" - " :exports code" - ;; If you want each tmate command to run from a particular directory - ;; " :prologue (concat \"cd \" ssh-dir \"\n\")" - ;; " :prologue (concat "cd " org-file-dir "\n") )) - )) - (cons 'header-args:go - (concat - ":noweb yes" - ;; " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results output code verbatim replace" - " :exports both" - " :wrap EXPORT text" - )) - (cons 'header-args:emacs-lisp - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results replace code" - " :exports both" - )) - (cons 'header-args:elisp - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results code" - " :exports both" - )) - (cons 'header-args:bash - (concat - ":noweb yes" - " :noweb-ref " item-str - " :comments org" - " :eval no-export" - " :results output code verbatim replace" - " :exports both" - " :wrap EXAMPLE" - ;; This can help catch stderr and other issues - ;; " :prologue \"exec 2>&1\n\"" - ;; " :epilogue \":\n\"" - ;; " :prologue exec 2>&1\n(\n" - ;; " :epilogue )\n:\n" - ;; If you want commands executing over tramp - ;; " :dir (symbol-value 'tmpdir)" - ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" - ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" - ;; If you want to feed an application via HEREDOC - ;; :PROPERTIES: - ;; " :prologue exec 2>&1\nbq query -n 2000 --nouse_legacy_sql </dev/null " - "; echo Share the above with your friends and hit enter when done. " - "; read " - "; bash --login\"" - ) - ) - (set (make-local-variable 'start-tmate-for-togetherly-client) - (let ( - (togetherly-socket (make-temp-file (concat "/tmp/" user-buffer "-"))) - ) - (concat - "tmate -S " - togetherly-socket - " new-session -A -s " - user-login-name - " -n main " - "\"tmate wait tmate-ready " - "&& TMATE_CONNECT=\\$(" - "tmate display -p '#{tmate_ssh} # " - user-buffer - "." - togetherly-socket - ".TOGETHERLY # " - ;; would like this to be shorter - (concat - (format-time-string "%Y-%m-%d %T") - (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) - " # #{tmate_web} ') " - "; echo \\$TMATE_CONNECT " - "; (echo \\$TMATE_CONNECT | xclip -i -sel p -f | xclip -i -sel c ) 2>/dev/null " - "; echo Share this url with someone both be able to togethrly the same buffer. " - "; read " - "; emacs -nw --eval '\(togetherly-client-quick-start \"" (number-to-string togetherly-port) "\")'\"" - ) - ) - ) - ;; at some point we can bring back working on remote hosts - (set (make-local-variable 'start-tmate-over-ssh-command) - (concat - "tmate -S " - socket - " new-session -A -s " - user-login-name - " -n main " - "\"tmate wait tmate-ready " - "\\&\\& TMATE_CONNECT=\\$\\(" - "tmate display -p '#{tmate_ssh} # " - user-buffer - ".target # " - (concat - (format-time-string "%Y-%m-%d %T") - (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) - " #{tmate_web} '\\) " - "; echo \\$TMATE_CONNECT " - "; \\(echo \\$TMATE_CONNECT \\| xclip -i -sel p -f \\| xclip -i -sel c \\) 2>/dev/null " - "; echo Share the above with your friends and hit enter when done. " - "; read " - "; bash --login\"" - ) - ) - ;; # eval: (set (make-local-variable 'ssh-user-host) (concat ssh-user "@" ssh-host)) - ;; # eval: (set (make-local-variable 'start-tmate-over-ssh-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) - ;; # eval: (set (make-local-variable 'start-tmate-locally-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) - ;; # eval: (xclip-mode 1) - ;; # eval: (gui-select-text (concat "ssh -tAX " ssh-user-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) - (defun togetherly-server-start-now () - "Start a Togetherly server with this buffer." - (interactive) - (cond ((null togetherly--server) - (let* ((addr "127.0.0.1") - (server-port togetherly-port) - (server-name user-login-name) - (server-proc (make-network-process - :name "togetherly-server" :server t - :service server-port :noquery t :host addr - :sentinel 'togetherly--server-sentinel-function - :filter 'togetherly--server-filter-function)) - (rcolor (car togetherly-region-colors)) - (pcolor (car togetherly-cursor-colors))) - (setq togetherly-region-colors (cdr togetherly-region-colors) - togetherly-cursor-colors (cdr togetherly-cursor-colors) - togetherly--server `(,server-proc ,server-name ,rcolor . ,pcolor) - togetherly--server-buffer (current-buffer) - togetherly--server-clients nil - togetherly--server-timer-object - (run-with-timer nil togetherly-cursor-sync-rate - 'togetherly--server-broadcast-cursor-positions)) - (set (make-local-variable 'header-line-format) - (concat " " (propertize server-name 'face `(:background ,pcolor))))) - (add-hook 'before-change-functions 'togetherly--server-before-change nil t) - (add-hook 'after-change-functions 'togetherly--server-after-change nil t) - (add-hook 'kill-buffer-query-functions 'togetherly--server-kill-buffer-query) - (populate-x-togetherly) ;; go ahead and create the tmate paste for the togetherly - ) - ((y-or-n-p "Togetherly server already started. Migrate to this buffer ? ") - (set (make-local-variable 'header-line-format) - (buffer-local-value 'header-line-format togetherly--server-buffer)) - (add-hook 'before-change-functions 'togetherly--server-before-change nil t) - (add-hook 'after-change-functions 'togetherly--server-after-change nil t) - (with-current-buffer togetherly--server-buffer - (remove-hook 'before-change-functions 'togetherly--server-before-change t) - (remove-hook 'after-change-functions 'togetherly--server-after-change t) - (kill-local-variable 'header-line-format)) - (setq togetherly--server-buffer (current-buffer)) - (togetherly--server-broadcast `(welcome ,(togetherly--buffer-string) . ,major-mode)) - ) - (t - (message "Togetherly: Canceled.")))) - (defun populate-x-togetherly () - "Populate the clipboard with the command for a together client" - (interactive) - (message "Setting X Clipboard to contain the start-tmate command") - (xclip-mode 1) - (gui-select-text start-tmate-for-togetherly-client) - ) - (defun runs-and-exits-zero (program &rest args) - "Run PROGRAM with ARGS and return the exit code." - (with-temp-buffer - (if (= 0 (apply 'call-process program nil (current-buffer) nil args)) - 'true - )) - ) - (defun xclip-working () - "Quick Check to see if X is working." - (if (getenv "DISPLAY") - ;; this xset test is a bit flakey - ;; (if (runs-and-exits-zero "xset" "q") - ;; Using xclip to set an invalid selection is as lightly intrusive - ;; check I could come up with, and not overwriting anything - ;; however it seems to hang - ;; (if (runs-and-exits-zero "xclip" "-selection" "unused") - ;; 'true) - 'true - ;; ) - ) - ) - (defun populate-x-clipboard () - "Populate the X clipboard with the start-tmate-command" - (message "Setting X Clipboard to contain the start-tmate command") - (xclip-mode 1) - (gui-select-text start-tmate-command) - (xclip-mode 0) - (with-current-buffer (get-buffer-create "start-tmate-command") - (insert-for-yank "The following has been populated to your local X clipboard:\n") - ) - ) - ;; For testing / setting DISPLAY to something else - ;; (getenv "DISPLAY") - ;; (setenv "DISPLAY" ":0") - ;; As we start on other OSes, we'll need to copy this differently - (if (xclip-working) - (populate-x-clipboard) - (with-current-buffer (get-buffer-create "start-tmate-command" ) - (insert-for-yank "You will need to copy this manually:\n\n" ) - ) - ) - ;; needs to be global, so it's availabel to the other buffer - (setq tmate-command start-tmate-command) - (with-current-buffer (get-buffer-create "start-tmate-command") - (insert-for-yank - (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) - )) - (switch-to-buffer "start-tmate-command") - (y-or-n-p "Have you Pasted?") - ;; https://www.wisdomandwonder.com/article/10630/how-fast-can-you-tangle-in-org-mode - (setq help/default-gc-cons-threshold gc-cons-threshold) - (defun help/set-gc-cons-threshold (&optional multiplier notify) - "Set `gc-cons-threshold' either to its default value or a - `multiplier' thereof." - (let* ((new-multiplier (or multiplier 1)) - (new-threshold (* help/default-gc-cons-threshold - new-multiplier))) - (setq gc-cons-threshold new-threshold) - (when notify (message "Setting `gc-cons-threshold' to %s" - new-threshold)))) - (defun help/double-gc-cons-threshold () "Double `gc-cons-threshold'." (help/set-gc-cons-threshold 2)) - (add-hook 'org-babel-pre-tangle-hook #'help/double-gc-cons-threshold) - (add-hook 'org-babel-post-tangle-hook #'help/set-gc-cons-threshold) - ;; info:org#Conflicts for org 9 and very recent yas - (defun yas/org-very-safe-expand () - (let ((yas/fallback-behavior 'return-nil)) (yas/expand))) +;; ;;; Directory Local Variables +;; ;;; For more information see (info "(emacs) Directory Variables") +;; ( +;; (org-mode +;; (org-babel-tmate-session-prefix . "") +;; (org-babel-tmate-default-window-name . "main") +;; (org-confirm-babel-evaluate . nil) +;; (org-use-property-inheritance . t) +;; (org-file-dir . (file-name-directory buffer-file-name)) +;; (eval +;; . +;; (progn +;; ;; (let ((socket-arg (concat ":socket " "FLOOPIE" )))) +;; ;; (set (make-local-variable 'tmpdir) +;; ;; (make-temp-file (concat "/dev/shm/" user-buffer "-") t)) +;; (set (make-local-variable 'ssh-user) +;; "pair") +;; ;; user-login-name) +;; ;; might be nice to set this as a global property in the org file +;; (set (make-local-variable 'ssh-host) +;; "ii.cncf.ci") +;; (set (make-local-variable 'ssh-user-host) +;; (concat ssh-user "@" ssh-host)) +;; (set (make-local-variable 'time-stamp-zone) +;; "Pacific/Auckland") +;; (set (make-local-variable 'time-stamp-pattern) +;; ;; https://www.emacswiki.org/emacs/TimeStamp +;; "10/#+UPDATED: needs time-local formatted regexp") +;; (set (make-local-variable 'user-buffer) +;; (concat user-login-name "." (file-name-base load-file-name))) +;; (set (make-local-variable 'socket) +;; (concat "/tmp/" user-buffer ".target.iisocket")) +;; (set (make-local-variable 'socket-param) +;; (concat ":sockets " socket)) +;; (set (make-local-variable 'item-str) +;; "(nth 4 (org-heading-components))") +;; (set (make-local-variable 'togetherly-port) +;; (+ (random 60000) 1024)) +;; (set (make-local-variable 'org-file-properties) +;; (list +;; (cons 'header-args:tmate +;; (concat +;; ":noweb yes" +;; " :eval never-export" +;; " :noweb-ref " item-str +;; " :comments org" +;; " :results silent " +;; " :session (concat user-login-name \":main\" )" +;; ;; " :session (concat user-login-name \":\" " "main" ")" +;; ;; " :session (concat user-login-name \":\" " item-str ")" +;; " :socket " socket +;; " :window " user-login-name +;; " :terminal sakura" +;; " :exports code" +;; ;; If you want each tmate command to run from a particular directory +;; ;; " :prologue (concat \"cd \" ssh-dir \"\n\")" +;; ;; " :prologue (concat "cd " org-file-dir "\n") )) +;; )) +;; (cons 'header-args:go +;; (concat +;; ":noweb yes" +;; ;; " :noweb-ref " item-str +;; " :comments org" +;; " :eval no-export" +;; " :results output code verbatim replace" +;; " :exports both" +;; " :wrap EXPORT text" +;; )) +;; (cons 'header-args:emacs-lisp +;; (concat +;; ":noweb yes" +;; " :noweb-ref " item-str +;; " :comments org" +;; " :eval no-export" +;; " :results replace code" +;; " :exports both" +;; )) +;; (cons 'header-args:elisp +;; (concat +;; ":noweb yes" +;; " :noweb-ref " item-str +;; " :comments org" +;; " :eval no-export" +;; " :results code" +;; " :exports both" +;; )) +;; (cons 'header-args:bash +;; (concat +;; ":noweb yes" +;; " :noweb-ref " item-str +;; " :comments org" +;; " :eval no-export" +;; " :results output code verbatim replace" +;; " :exports both" +;; " :wrap EXAMPLE" +;; ;; This can help catch stderr and other issues +;; ;; " :prologue \"exec 2>&1\n\"" +;; ;; " :epilogue \":\n\"" +;; ;; " :prologue exec 2>&1\n(\n" +;; ;; " :epilogue )\n:\n" +;; ;; If you want commands executing over tramp +;; ;; " :dir (symbol-value 'tmpdir)" +;; ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" +;; ;; " :dir (concat \"ssh:\" ssh-user \"@\" ssh-host \":~\"" +;; ;; If you want to feed an application via HEREDOC +;; ;; :PROPERTIES: +;; ;; " :prologue exec 2>&1\nbq query -n 2000 --nouse_legacy_sql </dev/null " +;; "; echo Share the above with your friends and hit enter when done. " +;; "; read " +;; "; bash --login\"" +;; ) +;; ) +;; (set (make-local-variable 'start-tmate-for-togetherly-client) +;; (let ( +;; (togetherly-socket (make-temp-file (concat "/tmp/" user-buffer "-"))) +;; ) +;; (concat +;; "tmate -S " +;; togetherly-socket +;; " new-session -A -s " +;; user-login-name +;; " -n main " +;; "\"tmate wait tmate-ready " +;; "&& TMATE_CONNECT=\\$(" +;; "tmate display -p '#{tmate_ssh} # " +;; user-buffer +;; "." +;; togetherly-socket +;; ".TOGETHERLY # " +;; ;; would like this to be shorter +;; (concat +;; (format-time-string "%Y-%m-%d %T") +;; (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) +;; " # #{tmate_web} ') " +;; "; echo \\$TMATE_CONNECT " +;; "; (echo \\$TMATE_CONNECT | xclip -i -sel p -f | xclip -i -sel c ) 2>/dev/null " +;; "; echo Share this url with someone both be able to togethrly the same buffer. " +;; "; read " +;; "; emacs -nw --eval '\(togetherly-client-quick-start \"" (number-to-string togetherly-port) "\")'\"" +;; ) +;; ) +;; ) +;; ;; at some point we can bring back working on remote hosts +;; (set (make-local-variable 'start-tmate-over-ssh-command) +;; (concat +;; "tmate -S " +;; socket +;; " new-session -A -s " +;; user-login-name +;; " -n main " +;; "\"tmate wait tmate-ready " +;; "\\&\\& TMATE_CONNECT=\\$\\(" +;; "tmate display -p '#{tmate_ssh} # " +;; user-buffer +;; ".target # " +;; (concat +;; (format-time-string "%Y-%m-%d %T") +;; (funcall (lambda ($x) (format "%s:%s" (substring $x 0 3) (substring $x 3 5))) (format-time-string "%z"))) +;; " #{tmate_web} '\\) " +;; "; echo \\$TMATE_CONNECT " +;; "; \\(echo \\$TMATE_CONNECT \\| xclip -i -sel p -f \\| xclip -i -sel c \\) 2>/dev/null " +;; "; echo Share the above with your friends and hit enter when done. " +;; "; read " +;; "; bash --login\"" +;; ) +;; ) +;; ;; # eval: (set (make-local-variable 'ssh-user-host) (concat ssh-user "@" ssh-host)) +;; ;; # eval: (set (make-local-variable 'start-tmate-over-ssh-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) +;; ;; # eval: (set (make-local-variable 'start-tmate-locally-command) (concat "tmate -S " socket " new-session -A -s " user-login-name " -n main \\\"tmate wait tmate-ready \\&\\& tmate display -p \\'#{tmate_ssh}\\' \\| xclip -i -sel p -f \\| xclip -i -sel c \\&\\& bash --login\\\"")) +;; ;; # eval: (xclip-mode 1) +;; ;; # eval: (gui-select-text (concat "ssh -tAX " ssh-user-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) +;; (defun togetherly-server-start-now () +;; "Start a Togetherly server with this buffer." +;; (interactive) +;; (cond ((null togetherly--server) +;; (let* ((addr "127.0.0.1") +;; (server-port togetherly-port) +;; (server-name user-login-name) +;; (server-proc (make-network-process +;; :name "togetherly-server" :server t +;; :service server-port :noquery t :host addr +;; :sentinel 'togetherly--server-sentinel-function +;; :filter 'togetherly--server-filter-function)) +;; (rcolor (car togetherly-region-colors)) +;; (pcolor (car togetherly-cursor-colors))) +;; (setq togetherly-region-colors (cdr togetherly-region-colors) +;; togetherly-cursor-colors (cdr togetherly-cursor-colors) +;; togetherly--server `(,server-proc ,server-name ,rcolor . ,pcolor) +;; togetherly--server-buffer (current-buffer) +;; togetherly--server-clients nil +;; togetherly--server-timer-object +;; (run-with-timer nil togetherly-cursor-sync-rate +;; 'togetherly--server-broadcast-cursor-positions)) +;; (set (make-local-variable 'header-line-format) +;; (concat " " (propertize server-name 'face `(:background ,pcolor))))) +;; (add-hook 'before-change-functions 'togetherly--server-before-change nil t) +;; (add-hook 'after-change-functions 'togetherly--server-after-change nil t) +;; (add-hook 'kill-buffer-query-functions 'togetherly--server-kill-buffer-query) +;; (populate-x-togetherly) ;; go ahead and create the tmate paste for the togetherly +;; ) +;; ((y-or-n-p "Togetherly server already started. Migrate to this buffer ? ") +;; (set (make-local-variable 'header-line-format) +;; (buffer-local-value 'header-line-format togetherly--server-buffer)) +;; (add-hook 'before-change-functions 'togetherly--server-before-change nil t) +;; (add-hook 'after-change-functions 'togetherly--server-after-change nil t) +;; (with-current-buffer togetherly--server-buffer +;; (remove-hook 'before-change-functions 'togetherly--server-before-change t) +;; (remove-hook 'after-change-functions 'togetherly--server-after-change t) +;; (kill-local-variable 'header-line-format)) +;; (setq togetherly--server-buffer (current-buffer)) +;; (togetherly--server-broadcast `(welcome ,(togetherly--buffer-string) . ,major-mode)) +;; ) +;; (t +;; (message "Togetherly: Canceled.")))) +;; (defun populate-x-togetherly () +;; "Populate the clipboard with the command for a together client" +;; (interactive) +;; (message "Setting X Clipboard to contain the start-tmate command") +;; (xclip-mode 1) +;; (gui-select-text start-tmate-for-togetherly-client) +;; ) +;; (defun runs-and-exits-zero (program &rest args) +;; "Run PROGRAM with ARGS and return the exit code." +;; (with-temp-buffer +;; (if (= 0 (apply 'call-process program nil (current-buffer) nil args)) +;; 'true +;; )) +;; ) +;; (defun xclip-working () +;; "Quick Check to see if X is working." +;; (if (getenv "DISPLAY") +;; ;; this xset test is a bit flakey +;; ;; (if (runs-and-exits-zero "xset" "q") +;; ;; Using xclip to set an invalid selection is as lightly intrusive +;; ;; check I could come up with, and not overwriting anything +;; ;; however it seems to hang +;; ;; (if (runs-and-exits-zero "xclip" "-selection" "unused") +;; ;; 'true) +;; 'true +;; ;; ) +;; ) +;; ) +;; (defun populate-x-clipboard () +;; "Populate the X clipboard with the start-tmate-command" +;; (message "Setting X Clipboard to contain the start-tmate command") +;; (xclip-mode 1) +;; (gui-select-text start-tmate-command) +;; (xclip-mode 0) +;; (with-current-buffer (get-buffer-create "start-tmate-command") +;; (insert-for-yank "The following has been populated to your local X clipboard:\n") +;; ) +;; ) +;; ;; For testing / setting DISPLAY to something else +;; ;; (getenv "DISPLAY") +;; ;; (setenv "DISPLAY" ":0") +;; ;; As we start on other OSes, we'll need to copy this differently +;; (if (xclip-working) +;; (populate-x-clipboard) +;; (with-current-buffer (get-buffer-create "start-tmate-command" ) +;; (insert-for-yank "You will need to copy this manually:\n\n" ) +;; ) +;; ) +;; ;; needs to be global, so it's availabel to the other buffer +;; (setq tmate-command start-tmate-command) +;; (with-current-buffer (get-buffer-create "start-tmate-command") +;; (insert-for-yank +;; (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) +;; )) +;; (switch-to-buffer "start-tmate-command") +;; (y-or-n-p "Have you Pasted?") +;; ;; https://www.wisdomandwonder.com/article/10630/how-fast-can-you-tangle-in-org-mode +;; (setq help/default-gc-cons-threshold gc-cons-threshold) +;; (defun help/set-gc-cons-threshold (&optional multiplier notify) +;; "Set `gc-cons-threshold' either to its default value or a +;; `multiplier' thereof." +;; (let* ((new-multiplier (or multiplier 1)) +;; (new-threshold (* help/default-gc-cons-threshold +;; new-multiplier))) +;; (setq gc-cons-threshold new-threshold) +;; (when notify (message "Setting `gc-cons-threshold' to %s" +;; new-threshold)))) +;; (defun help/double-gc-cons-threshold () "Double `gc-cons-threshold'." (help/set-gc-cons-threshold 2)) +;; (add-hook 'org-babel-pre-tangle-hook #'help/double-gc-cons-threshold) +;; (add-hook 'org-babel-post-tangle-hook #'help/set-gc-cons-threshold) +;; ;; info:org#Conflicts for org 9 and very recent yas +;; (defun yas/org-very-safe-expand () +;; (let ((yas/fallback-behavior 'return-nil)) (yas/expand))) - (yas/expand) - (make-variable-buffer-local 'yas/trigger-key) - (setq yas/trigger-key [tab]) - (add-to-list 'org-tab-first-hook 'yas/org-very-safe-expand) - (define-key yas/keymap [tab] 'yas/next-field) - ;; (gui-select-text (concat "rm -fi " socket "; ssh -tAX " ssh-user "@" ssh-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) - ;; (edebug-trace "TRACING socket:%S" socket) - ;; (edebug-trace "TRACING org-babel-header-args:tmate %S" org-babel-header-args:emacs-lisp) - ;; we could try and create a buffer / clear it on the fly - ;; ssh later? - ;; (with-current-buffer (get-buffer-create "start-tmate-command") - ;; (insert-for-yank - ;; (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) - ;; )) - ;; FIXME! How do we find out what our local filname is? - ;; This was designed for dir-locals... can we reach in? - ;; (switch-to-buffer (get-buffer buffer-file-name)) - ;; (spacemacs/toggle-maximize-buffer) - ) - ) - ) - ) -;; Add Later -;; https://www.emacswiki.org/emacs/AutomaticFileHeaders #templates / updates etc -;; ^^ based on https://www.emacswiki.org/emacs/download/header2.el -;; ;; https://stackoverflow.com/questions/13228001/org-mode-nested-properties -;; https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/ -;; ^^ https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/cz0bb45/ -;;http://endlessparentheses.com/markdown-style-link-ids-in-org-mode.html +;; (yas/expand) +;; (make-variable-buffer-local 'yas/trigger-key) +;; (setq yas/trigger-key [tab]) +;; (add-to-list 'org-tab-first-hook 'yas/org-very-safe-expand) +;; (define-key yas/keymap [tab] 'yas/next-field) +;; ;; (gui-select-text (concat "rm -fi " socket "; ssh -tAX " ssh-user "@" ssh-host " -L " socket ":" socket " " start-tmate-over-ssh-command)) +;; ;; (edebug-trace "TRACING socket:%S" socket) +;; ;; (edebug-trace "TRACING org-babel-header-args:tmate %S" org-babel-header-args:emacs-lisp) +;; ;; we could try and create a buffer / clear it on the fly +;; ;; ssh later? +;; ;; (with-current-buffer (get-buffer-create "start-tmate-command") +;; ;; (insert-for-yank +;; ;; (concat "\nOpen another terminal on the same host and paste:\n\n" tmate-command) +;; ;; )) +;; ;; FIXME! How do we find out what our local filname is? +;; ;; This was designed for dir-locals... can we reach in? +;; ;; (switch-to-buffer (get-buffer buffer-file-name)) +;; ;; (spacemacs/toggle-maximize-buffer) +;; ) +;; ) +;; ) +;; ) +;; ;; Add Later +;; ;; https://www.emacswiki.org/emacs/AutomaticFileHeaders #templates / updates etc +;; ;; ^^ based on https://www.emacswiki.org/emacs/download/header2.el +;; ;; ;; https://stackoverflow.com/questions/13228001/org-mode-nested-properties +;; ;; https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/ +;; ;; ^^ https://www.reddit.com/r/emacs/comments/4154bu/how_to_get_orgmode_to_recognize_markdownstyle/cz0bb45/ +;; ;;http://endlessparentheses.com/markdown-style-link-ids-in-org-mode.html diff --git a/learning/knative/README.org b/learning/knative/README.org new file mode 100644 index 0000000..964b605 --- /dev/null +++ b/learning/knative/README.org @@ -0,0 +1,245 @@ +#+TITLE: Knative + +* Start your Pair instances + +with this Env, you'll get Knative+Contour (Contour replacing nginx-ingress) +#+begin_src +SHARINGIO_PAIR_INIT_EXTRAS=knative +#+end_src + +* What is Knative? +- a serverless platform that runs a'top of Kubernetes +- event driven +- a way of abstracting and simplifying HTTP(1/2) apps on Kubernetes + +* What is Serverless? +- code / container focused +- per-request processing + +* Knative Services +Looks minimal eh? +#+begin_src yaml +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: nginx + namespace: bobymcbobs +spec: + template: + spec: + containers: + - image: nginx:stable + ports: + - containerPort: 80 +#+end_src + +** Behind the scenes +Service +-> Configuration + -> Revision + -> Deployment + -> ReplicaSet + -> Pod + +But you don't really need to know much about it! + +* Features +- request driven autoscaling +- image tag resolving +- traffic splitting +- auto domain assigning +- auto-tls termination +- custom domain assigning + +* kn cli +** Create a service +#+begin_src tmate :window knative-playground +kn service create nginx --image=nginx:stable --port=80 +#+end_src + +** Delete a service +#+begin_src tmate :window knative-playground +kn service delete nginx +#+end_src + +* Accessing services +#+begin_src shell +kn service list -A +#+end_src + +#+RESULTS: +#+begin_example +NAMESPACE NAME URL LATEST AGE CONDITIONS READY REASON +test nginx https://nginx.test.bobymcbobs.pair.sharing.io nginx-00001 17m 3 OK / 3 True +#+end_example + +* What makes up Knative Serving? +** Components +#+begin_src shell +kubectl -n knative-serving get deployment +#+end_src + +#+RESULTS: +#+begin_example +NAME READY UP-TO-DATE AVAILABLE AGE +activator 1/1 1 1 48m +autoscaler 1/1 1 1 48m +autoscaler-hpa 1/1 1 1 48m +controller 1/1 1 1 48m +domain-mapping 1/1 1 1 48m +domainmapping-webhook 1/1 1 1 48m +net-certmanager-controller 1/1 1 1 48m +net-certmanager-webhook 1/1 1 1 48m +net-contour-controller 1/1 1 1 48m +webhook 1/1 1 1 48m +#+end_example + +#+begin_notes +Activator: listens for endpoints hit and scales up revision deployments +#+end_notes + +** Configuration +#+begin_src shell +kubectl -n knative-serving get cm +#+end_src + +#+RESULTS: +#+begin_example +NAME DATA AGE +config-autoscaler 1 59m +config-certmanager 2 59m +config-contour 1 59m +config-defaults 2 59m +config-deployment 2 59m +config-domain 2 59m +config-features 1 59m +config-gc 1 59m +config-leader-election 1 59m +config-logging 1 59m +config-network 5 59m +config-observability 1 59m +config-tracing 1 59m +kube-root-ca.crt 1 59m +#+end_example + +#+begin_notes +Since we're using Knative-Operator we don't modify the configuration this way, but checking them out is useful for observing the exact config +#+end_notes + +* Pair default Knative Serving configuration +#+begin_src shell :wrap "SRC yaml" +cat ~/.sharing.io/cluster-api/manifests/extras/knative/03-serving.yaml +#+end_src + +#+RESULTS: +#+begin_SRC yaml +apiVersion: v1 +kind: Namespace +metadata: + name: knative-serving +--- +apiVersion: operator.knative.dev/v1beta1 +kind: KnativeServing +metadata: + name: knative-serving + namespace: knative-serving +spec: + version: 1.3.0 + additionalManifests: + - URL: https://github.com/knative/net-certmanager/releases/download/knative-v1.3.0/release.yaml + ingress: + istio: + enabled: false + kourier: + enabled: false + contour: + enabled: true + config: + contour: + default-tls-secret: ${SHARINGIO_PAIR_INSTANCE_SETUP_USERLOWERCASE}/letsencrypt-prod-without-selector + network: + ingress-class: contour.ingress.networking.knative.dev + clusteringress.class: contour.ingress.networking.knative.dev + certificate-provider: cert-manager + autocreate-cluster-domain-claims: "true" + auto-tls: Enabled + default-external-scheme: HTTPS + http-protocol: Redirected + defaults: + container-name-template: "{{.Name}}" + domain: + ${SHARINGIO_PAIR_BASE_DNS_NAME}: "" + certmanager: + issuerRef: | + kind: ClusterIssuer + name: letsencrypt-prod-without-selector +#+end_SRC + + +* LOCAL VARS :noexport: + Local Variables: + eval: (setenv "MULTIPLEX_URL" (or (getenv "MULTIPLEX_URL") "https://reveal-multiplex.glitch.me")) + eval: (set-variable 'org-re-reveal-root (getenv "MULTIPLEX_URL")) + eval: (set-variable 'org-re-reveal-multiplex-url (concat (getenv "MULTIPLEX_URL") "/")) + eval: (set-variable 'org-re-reveal-multiplex-socketio-url (concat (getenv "MULTIPLEX_URL") "/socket.io/socket.io.js")) + End: +#+REVEAL_MULTIPLEX_SECRET: 16303595814587938032 +#+REVEAL_MULTIPLEX_ID: 1ea00b34ec29b2a6 +#+REVEAL_VERSION: 4 +#+NOREVEAL_DEFAULT_FRAG_STYLE: YY +#+REVEAL_EXTRA_CSS: ./ii-style.css +#+NOREVEAL_EXTRA_JS: YY +#+REVEAL_HLEVEL: 2 +#+REVEAL_MARGIN: 0 +#+REVEAL_WIDTH: 5000 +#+REVEAL_HEIGHT: 800 +#+REVEAL_MAX_SCALE: 0.9 +#+REVEAL_MIN_SCALE: 0.2 +#+REVEAL_PLUGINS: (markdown notes highlight multiplex) +#+REVEAL_SLIDE_NUMBER: "" +#+REVEAL_PROGRESS: f +#+REVEAL_SPEED: 1 +#+REVEAL_THEME: night +#+REVEAL_THEME_OPTIONS: beige|black|blood|league|moon|night|serif|simple|sky|solarized|white +#+REVEAL_TRANS: fade +#+REVEAL_TRANS_OPTIONS: none|cube|fade|concave|convex|page|slide|zoom +#+REVEAL_TITLE_SLIDE:

%t

+#+REVEAL_TITLE_SLIDE:

%s

+#+REVEAL_TITLE_SLIDE:

%a

+ +#+OPTIONS: num:nil +#+OPTIONS: toc:nil +#+OPTIONS: mathjax:Y +#+OPTIONS: reveal_single_file:nil +#+OPTIONS: reveal_control:t +#+OPTIONS: reveal_progress:nil +#+OPTIONS: reveal_history:nil +#+OPTIONS: reveal_center:t +#+OPTIONS: reveal_rolling_links:nil +#+OPTIONS: reveal_keyboard:t +#+OPTIONS: reveal_overview:t +#+OPTIONS: reveal_width:1200 +#+OPTIONS: reveal_height:800 +#+OPTIONS: reveal_fragmentinurl:t +#+OPTIONS: timestamp:nil +#+OPTIONS: reveal_title_slide:nil + +* Set up :noexport: +Link up this folder to the web +#+begin_src shell :results silent +rm ~/public_html +ln -s $PWD ~/public_html +#+end_src + +Generate a token +#+begin_src shell +curl -s ${MULTIPLEX_URL:-reveal-multiplex.glitch.me}/token | jq . +#+end_src + +#+RESULTS: +#+begin_example +{ + "secret": "16303595814587938032", + "socketId": "1ea00b34ec29b2a6" +} +#+end_example diff --git a/learning/working-with-orgmode-tables-basics.org b/learning/working-with-orgmode-tables-basics.org index 210a933..3b14d7c 100644 --- a/learning/working-with-orgmode-tables-basics.org +++ b/learning/working-with-orgmode-tables-basics.org @@ -3,6 +3,9 @@ #+EMAIL: stephen@ii.coop #+CREATOR: ii.coop #+DATE: 9th of March, 2019 +#+DATE_CREATED: 2019-03-09 +#+DATE_UPDATED: 2019-03-09 +#+FIRN_SUMMARY: An introduction to how to create/edit tables with Org-Mode. #+STARTUP: showeverything * Overview diff --git a/ncw/.dir-locals.el b/ncw/.dir-locals.el new file mode 100644 index 0000000..ded0d1b --- /dev/null +++ b/ncw/.dir-locals.el @@ -0,0 +1,66 @@ +;;; -*- lexical-binding: t; -*- +(( + org-mode . + ((eval . + (progn + (require 'org-ql) + + (setq org-publish-project-alist + '(("ncw-html" + :base-directory "~/org/ncw/" + :base-extention "org" + :publishing-directory "~/org/ncw/public/" + :recursive t + :publishing-function org-html-publish-to-html + :html-container "section" + :html-head-extra "" + :html-divs ((preamble "div" "preamble") + (content "article" "content") + (postamble "div" "postamble")) + :html-doctype "html5" + :headline-levels 2 + :html-html5-fancy t))) + + (org-link-set-parameters "dfn" + ;; :follow #'org-dfn-follow + :export #'org-dfn-export) + + (defcustom org-man-command 'dfn + "Emacs link to pull from local glossary" + :group 'org-link) + + (defun org-dfn-pull-term (term) + "Pull definition of term, TERM, from local GLOSSARY. +GLOSSARY assumed to be org file where headings are TERMs and each +heading has a property drawer with a DEFINITION value" + (cdr (car(-filter (lambda (x) (string= (car x) "DEFINITION")) + (car (org-ql-query + :select '(org-entry-properties) + :from "~/org/ncw/glossary.org" + :where `(heading ,term))))))) + (defun gloss-snippet (description definition) + (let ((html " + %s + + %s + + ")) + (format html description definition))) + + (defun org-dfn-export (link description format _) + "Export a man page link from Org files." + (let ((desc (or description link)) + (definition (org-dfn-pull-term link))) + (pcase format + (`html (gloss-snippet desc definition)) + ;; (format " + ;; + ;; %s + ;; %s + ;; " desc definition)) + (`latex (format "\\href{%s}{%s}" path desc)) + (`texinfo (format "@uref{%s,%s}" path desc)) + (`ascii (format "%s (%s)" desc path)) + (t path))))))))) diff --git a/ncw/glossary.org b/ncw/glossary.org new file mode 100644 index 0000000..8d86444 --- /dev/null +++ b/ncw/glossary.org @@ -0,0 +1,29 @@ +#+TITLE: Glossary +#+PURPOSE: each heading is a term, add the definition within the property drawer + +* Docker +:PROPERTIES: +:DEFINITION: Containerization software that lets you package programs, and the hardware required to run them, into self-contained virtual machines. This makes them easier to share, copy, and run on different computers. +:END: +* Git +:PROPERTIES: +:DEFINITION: Git is a version control system, that lets you track and move through historical versions of your code, along with collaborative tools for working on code together. +:END: + +* Go +:PROPERTIES: +:DEFINITION: A programming language. The majority of Kubernetes is written in Go. +:END: +* Kubernetes +:PROPERTIES: +:DEFINITION: A container orchestration platform intended to simplify the management of complex infrastructure. see its homepage +:END: + +* SSH +:PROPERTIES: +:DEFINITION: Short for secure shell. It is a way of using keys to be able to access remote machines from your local computer, and a highly secure method of confirming your identity with these remote machines. +:END: +* Terminal +:PROPERTIES: +:DEFINITION: Also known as the command line, a program on your computer for talking directly to the computer using written commands. Digital Ocean has a nice introduction +:END: diff --git a/ncw/public/aesthetic/main.css b/ncw/public/aesthetic/main.css new file mode 100644 index 0000000..7a22f86 --- /dev/null +++ b/ncw/public/aesthetic/main.css @@ -0,0 +1,109 @@ +:root { + --colourA: hsl(45, 60.0%, 96%); /*often used as bg*/ + --colourB: hsl(300,1.7%,,11.6%); /*often used as text body colour*/ + --font_size: 22px; + --font_family: 'IBM Plex Sans'; +} + +body, html { + margin: 0; + padding: 0; + padding-left: 5.5%; + font-weight: 400; +} + +* { + box-sizing: border-box;; +} + +body { + background: var(--colourA); + color: var(--colourB); + font-size: var(--font_size); + font-family: var(--font_family); +} + +article { + max-width: 45rem; +} + +.marginnote { + float: right; + clear: right; + width: 18rem; + margin: 0 -20.5rem 1rem 0; + font-size: 0.9rem; + line-height: 1.3; + vertical-align: baseline; + position: relative; + font-style: italic; +} + +h1.title { + font-weight: 200; + font-size: 3rem; + text-align: left; + margin-bottom: 3rem; +} + +h2 { + border-top: 1px solid red; + font-weight: 300; + margin-top: 3rem; + +} +span.glossary-term { + display: inline; + position: relative; + background: white; + opacity: 0.9; + padding-left: 0.25em; + padding-right: 0.25em; + transition-property: opacity; + transition-duration: 0.20s; + color: black; + width: 1rem; + border: solid 1px grey; + box-sizing: content-box; + border-radius: 0.2em; + cursor: pointer; +} + +span.glossary-term_inner { + visibility: hidden; + display: block; + position: absolute; + right: -1px; + bottom: 1rem; + width: 12rem; + background: #999; + color: white; + padding: 1em; + font-size: 85%; + border-radius: 0.5em 0.5em 0 0.5em; + transform-origin: right bottom; + transform: scale(0.4); + opacity: 0; + transition-property: all; + transition-duration: 0.2s; +} + +span.glossary-term.visible span.glossary-term_inner { + visibility: visible; + transform-origin: right bottom; + transform: scale(1); + opacity: 1; + cursor: pointer; +} + +span.glossary-term_inner p { + display: inline; +} + +span.glossary-term:hover, span.glossary-term.visible { + background: #999; + color: white; + opacity: 1; + transition-property: opacity; + transition-duration: 0.20s; +} diff --git a/ncw/public/glossary.html b/ncw/public/glossary.html new file mode 100644 index 0000000..bbca73e --- /dev/null +++ b/ncw/public/glossary.html @@ -0,0 +1,257 @@ + + + + + + +Glossary + + + + + + + + +
+

Author: ii friend

+

Created: 2021-02-25 Thu 11:14

+
+ + diff --git a/ncw/public/session-01.html b/ncw/public/session-01.html new file mode 100644 index 0000000..834c356 --- /dev/null +++ b/ncw/public/session-01.html @@ -0,0 +1,788 @@ + + + + + + +Session 01 + + + + + + + + + +
+
+

Session 01

+
+
+

Introduction

+
+

+Welcome to part one of our “New Contributor Summit” guide. In this series we will learn how to run, customize, test, and contribute to + Kubernetes + + A container orchestration platform intended to simplify the management of complex infrastructure. see its homepage + + . +

+ +

+This first part assumes you are getting started on this path, and have little to no knowledge of running Kubernetes. You will want some familiarity with the + terminal + + Also known as the command line, a program on your computer for talking directly to the computer using written commands. Digital Ocean has a nice introduction + + though. +

+
+
+ +
+

Agenda

+
+

+In this session, we tackle: +

+
    +
  • hardware and OS requirements to run Kubernetes
  • +
  • Setting up your environment
  • +
  • github and your git configuration
  • +
  • forking and cloning Kubernetes
  • +
  • the workflow for contributing changes back to Kubernetes.
  • +
+
+
+
+

Hardware and OS Requirements

+
+

+Kubernetes can run on linux, mac, windows, or within + Docker + + Containerization software that lets you package programs, and the hardware required to run them, into self-contained virtual machines. This makes them easier to share, copy, and run on different computers. + + . +

+ +

+It is a large project, and will require a lot of computing power. Whichever system you use, ensure that it has at least: +

+
    +
  • 8GB of RAM
  • +
  • 50gb or more of free disk space
  • +
  • multiple cores
  • +
+
+
+
+

Setting up your environment

+
+

+To get started, you will want to configure your computer to be able to build and run Kubernetes. The way you configure it will change depending on if you are using Linux, Mac, or Windows. +

+
+
+

On linux

+
+

+This one is the easiest: there’s no additional configuration needed! +

+
+
+
+

On Mac

+
+

+Kubernetes assumes you have a set of GNU command line tools installed, which don’t come natively with the Mac. So you will want to install a package manager, called Brew, then use this to install the GNU tools. +

+
+
    +
  • Install Brew
    +
    +

    +The easiest way to install is from the brew homepage +

    + +

    +After installed, you should be able to run this command in the terminal: +

    + +
    +
    brew -v
    +
    +
    + +

    +And see output similar to: +

    + +
    +Homebrew 2.7.5
    +Homebrew/homebrew-core (git revision 33c47; last commit 2021-01-17)
    +Homebrew/homebrew-cask (git revision 4dc8d; last commit 2021-01-17)
    +
    +
    +
  • +
+
+ +
+

Install GNU tools

+
+

+In your terminal, enter the following command +

+
+
brew install coreutils ed findutils gawk gnu-sed gnu-tar grep make
+
+
+
    +
  • Setup your shell init script
  • +
+ +

+Lastly, you want your computer to know how to find these tools, which we can do by adding a code snippet +to the end of your .bashrc. +Add this snippet to the bottom of your .bashrc +

+
+GNUBINS="$(find /usr/local/opt -type d -follow -name gnubin -print)"
+
+for bindir in ${GNUBINS[@]}
+do
+  export PATH=$bindir:$PATH
+done
+
+export PATH
+
+
+ +
+
+ +
+

Installing prerequisites

+
+

+What you need to install, and how to install it, will change depending on the type of system you are using. +

+
+
+

If you are running Linux

+
+

+No additional considerations are needed! +

+
+
+
+

Mac

+
+

+With macs, you will want to have a mac package manager and then use that to install some necessary prerequisites. +

+
+
    +
  • Install Brew
    +
    +

    +This is a package manager for mac that you run in the command line. You can +download and install it from the brew homepage +

    +
    +
  • +
  • Install Dependencies
    +
    +

    +We’ll use brew to install these. +

    +
    +
    brew install coreutils ed findutils gawk gnu-sed gnu-tar grep make
    +
    +
    +
    +
  • +
  • Setup your .bashrc
    +
    +

    +Add this snippet to the bottom of your .bashrc +

    +
    +GNUBINS="$(find /usr/local/opt -type d -follow -name gnubin -print)"
    +
    +for bindir in ${GNUBINS[@]}
    +do
    +  export PATH=$bindir:$PATH
    +done
    +
    +export PATH
    +
    +
    +
  • +
  • Check out official docs
    +
    +

    +You can find them here: k8s/community/development.md +

    +
    +
  • +
+
+
+

Windows

+
+
+ +
+
+
+

Software Prerequisites

+
+
+
+

Docker

+
+
+
    +
  • What is docker
    +
    +
      +
    • a container engine
    • +
    • allows you to bundle and package
    • +
    +

    +Docker is a set of platform as a service (PaaS) products that use OS-level virtualization to deliver software in packages called containers. +Containers are isolated from one another and bundle their own software, libraries and configuration files; they can communicate with each other through well-defined channels. +All containers are run by a single operating system kernel and therefore use fewer resources than virtual machines. +

    +
    +
  • +
  • Check if you have docker installed
    +
    +

    +The operating-system independent way to check whether Docker is running is to ask Docker, using the docker info command. +You can also use operating system utilities, such as +

    +
    +shell sudo systemctl is-active docker
    +
    +

    +or +

    +
    +sudo status docker
    +
    +

    +or +

    +
    +sudo service docker status
    +
    +

    +or checking the service status using Windows utilities. +Finally, you can check in the process list for the `dockerd` process, using commands like +

    +
    + ps
    +
    +

    +or +

    +
    + top
    +
    +
    +
  • + +
  • Installing docker
    +
    +

    +*Docker Engine is available on a variety of Linux platforms, macOS and Windows 10 through Docker Desktop, and as a static binary installation. +Find your preferred operating system below. +

    +
    + +
      +
    • MacOS
      +
      +

      +Instruction for MacOS install +

      +
      +
    • + +
    • Linux
      +
      +

      +Instuctions for +Debain install +Fedora install +Ubuntu install +

      +
      +
    • + +
    • Windows
      +
      +

      +Docker Desktop for Windows is the Community version of Docker for Microsoft Windows. +You can download Docker Desktop for Windows from Docker Hub to install +

      +
      +
    • +
    +
  • +
+
+ + +
+

Git

+
+
+
    +
  • What is git
    +
    +

    +GitHub provides hosting for software development and version control using Git. +It offers the distributed version control and source code management (SCM) functionality of Git, plus its own features. +It provides access control and several collaboration features such as bug tracking, feature requests, task management and continuous integration. +

    +
    +
  • + +
  • Check if you have git installed
  • +
  • Installing git
    +
    +

    +In a terminal window run +`git –version` +If it is installed you will get a message like `git version 2.25.1` +

    +
    + + +
  • + +
  • Configure git
    +
    +

    +To use get you need a Github account. +If you do not have an account yet go to the Github website to sign up. +You’ll need: +

    +
      +
    • name
    • +
    • email
    • +
    • password
    • +
    + +

    +preparing for working with the k8s repo. +

    +
    +
  • +
+
+
+

Go

+
+
+
    +
  • What is go
    +
    +

    +Go or Golang as it is also known is an open source programming language that makes it easy to build simple, reliable, and efficient software. +

    +
    +
  • +
  • Installing go
    +
    +

    +We want to make check is Go is installed and what version. +Open Command Prompt / CMD ot Terminal window, execute the command to check the Go version. Make sure you have the latest version of Go. +$ go version +

    + +

    +If you need to install Go the official installation page have struction for Linux, Mac and Windows +

    +
    +
  • + +
  • Adding go to your path
    +
    +

    +and knowing how to find your $GOPATH – We can look here: https://golang.org/doc/gopath_code.html +

    +
    +
  • +
+
+
+

SSH Keys

+
+
+
    +
  • what is ssh
    +
    +

    +SSH is a secure protocol used as the primary means of connecting to Linux servers remotely. +It provides a text-based interface by spawning a remote shell. +After connecting, all commands you type in your local terminal are sent to the remote server and executed there. +SSH keys are a matching set of cryptographic keys which can be used for authentication. Each set contains a public and a private key. +The public key can be shared freely without concern, while the private key must be vigilantly guarded and never exposed to anyone. +

    +
    +
  • +
  • creating a new ssh key
    +
    +

    +To generate an RSA key pair on your local computer, type: +

    +
      +
    • ssh-keygen
    • +
    +

    +This will create to files in the .ssh directory. Your private key idrsa. and public key idrsa.pub +

    +
    +
  • +
+
+
+ +
+

Github configuration

+
+
+
+

Signing up for github account

+
+
+

Uploading your SSH Key

+
+
+

Signing the CNCF CLA

+
+
+
+

Forking and Cloning K8s

+
+
+
+

brief tour of k8s repo

+
+
+

forking to your own repo

+
+
+

cloning k8s down to your own computer

+
+
+
+

The Kubernetes git workflow

+
+
+
+

k8s/k8s is ’upstream’

+
+
+

you create a branch on your fork, and push and make changes.

+
+
+

then open a pr in upstream, comparing across forks.

+
+
+
+

Getting Additional Help

+
+

+We won’t be doing this live, but are there other resources we can offer for help? perhaps a slack channel that we’d be moderating during NCW times? A repo in which they can open issues for their questions? +

+
+
+
+

What’s Next?

+
+

+Outline of session 2. You have all the requirements, now we will build and hack on kubernetes! +

+
+
+
+
+

Author: ii friend

+

Created: 2021-02-25 Thu 11:14

+
+ + diff --git a/ncw/public/session-02.html b/ncw/public/session-02.html new file mode 100644 index 0000000..7ec6e6d --- /dev/null +++ b/ncw/public/session-02.html @@ -0,0 +1,406 @@ + + + + + + +Session 02 + + + + + + + +
+
+

Session 02

+
+ +
+

1 Introduction

+
+
    +
  • In this Session we will introduce you to the make command and kubernetes cmd folder.
  • +
  • You’ll also learn about KinD (kubernetes in docker)
  • +
  • +We’ll learn more about how k8s buid process works +

    + +

    +By the end, you will edit and build a kubernetes command that you can run on your own kind cluster! +This session continues on Session 1. If you haven’t done that one yet, do it first! +

  • +
+
+
+
+

2 Agenda

+
+
    +
  • Intro to make
  • +
  • Intro to CMD
  • +
  • The Build Process
  • +
  • Intro to KinD
  • +
  • Editing and Building
  • +
  • Running our command on KinD
  • +
+
+
+
+

3 Make

+
+
+
+

3.1 What it is

+
+
+

3.2 Ensuring you have it on your computer

+
+
+

3.3 How we use it

+
+
+
+

4 The CMD Folder

+
+
+
+

4.1 Where to find it

+
+
+

4.2 What it is

+
+
+
+

5 Making in Parts

+
+

+Why do we not make all of kubernetes (no don’t run make release) +What do we make in isolation? +

+
+
+
+

6 Verify Dev envrionment ready to go

+
+

+if needed, include instructions for each type of OS +

+
+
+

6.1 Have Docker

+
+
+

6.2 Have git

+
+
+

6.3 Have Go

+
+
+
    +
  1. GOPATH set
  2. +
+
+
+

6.4 Fork of k8s cloned to dev environment

+
+
+
+

7 Run a make command

+
+
+
+

7.1 cd into k8s from yr terminal

+
+
+

7.2 make WHAT=cmd/kubectl

+
+

+maybe edit the print output for fun, and see it change +

+
+
+
+
+

8 Make a KinD Cluster

+
+
+
+

8.1 What is kind?

+
+
+

8.2 Install Kind

+
+
+

8.3 kind create cluster

+
+
+
+

9 Use newly-built kubectl binary in the KinD cluster

+
+
+

10 Additional Help

+
+
+

11 What’s Next?

+
+
+
+

Author: ii friend

+

Created: 2021-02-24 Wed 13:47

+
+ + diff --git a/ncw/public/session-03.html b/ncw/public/session-03.html new file mode 100644 index 0000000..c5ec539 --- /dev/null +++ b/ncw/public/session-03.html @@ -0,0 +1,410 @@ + + + + + + +Session 03 + + + + + + + +
+
+

Session 03

+
+ +
+

1 Introduction

+
+

+In this, we’ll ramp up our abilities by adding testing into the mix! +Like session 2, we will edit, make, and run commands like kubectl, but now checking our builds with unit testing, using go test. +With these tests, we’ll have increased confidence in contributing our work back upstream and so we’ll also talk about pull requests, and the PR pre-submission practices. +

+
+
+
+

2 Agenda

+
+
    +
  • Setup our Dev environments
  • +
  • introduce unit tests
  • +
  • testing with go test and make
  • +
  • PR’s
  • +
  • An intro to prow and test grid
  • +
+
+
+
+

3 Setup

+
+
+
+

3.1 Kind

+
+
+

3.2 a working kubectl binary of some sort

+
+
+

3.3 go

+
+
+

3.4 make

+
+
+
+

4 Edit our kubectl binary

+
+

+Adjust its message again, or have it do something in addition +don’t build just yet +

+
+
+
+

5 Unit Tests

+
+
+
+

5.1 what are they?

+
+
+

5.2 why they important?

+
+
+

5.3 how k8s uses them

+
+
+
+

6 Write a unit test for our kubectl binary

+
+
+

7 check our test with go test

+
+
+

8 check our test with make

+
+
+

9 Test scope

+
+
+
+

9.1 only run some tests

+
+
+

9.2 run all tests

+
+
+

9.3 time to run all tests

+
+
+
+

10 PR’s

+
+
    +
  • review the pr flow again
  • +
  • review the PR pre-submission guidelines
  • +
  • review the style guidelines
  • +
  • show some of the checks done on an existing pr and the checks for the pre-submission and style
  • +
  • what is doing these checks?
  • +
+
+
+
+

11 Prow

+
+
+
+

11.1 k8s git ops

+
+
+

11.2 helps manage these steps of the pr

+
+
+

11.3 ensures yr pr follows the guidelines and passes all existing tests.

+
+
+
+

12 Testgrid

+
+
+
+

12.1 show all the tests being run and their success

+
+
+

12.2 this can be optional, and so

+
+
+
+

13 Additional Help

+
+
+

14 What’s Next?

+
+
+
+

Author: ii friend

+

Created: 2021-02-24 Wed 13:47

+
+ + diff --git a/ncw/session-01.org b/ncw/session-01.org new file mode 100644 index 0000000..cb4b6d0 --- /dev/null +++ b/ncw/session-01.org @@ -0,0 +1,199 @@ +#+TITLE: Session 01 +#+HTML_HEAD_EXTRA: +#+HTML_HEAD_EXTRA: +#+HTML_HEAD_EXTRA: + +* Introduction +Welcome to part one of our "New Contributor Summit" guide. In this series we will learn how to run, customize, test, and contribute to [[dfn:Kubernetes][Kubernetes]]. + +This first part assumes you are getting started on this path, and have little to no knowledge of running Kubernetes. You will want some familiarity with the [[dfn:Terminal][terminal]] though. + +* Agenda +In this session, we tackle: +- hardware and OS requirements to run Kubernetes +- Setting up your environment +- github and your git configuration +- forking and cloning Kubernetes +- the workflow for contributing changes back to Kubernetes. +* Hardware and OS Requirements +Kubernetes can run on linux, mac, windows, or within [[dfn:Docker][Docker]]. +# [[mn:1][If using Docker for Mac (or Windows), dedicate the Docker system multiple CPU cores and 6GB RAM]] +It is a large project, and will require a lot of computing power. Whichever system you use, ensure that it has at least: +- 8GB of RAM +- 50gb or more of free disk space +- multiple cores +* Setting up your environment +To get started, you will want to configure your computer to be able to build and run Kubernetes. The way you configure it will change depending on if you are using Linux, Mac, or Windows. +** On linux +This one is easy, there's no additional configuration needed! +** On Mac +Kubernetes assumes you have a set of GNU command line tools installed, which don't come natively with the Mac. So you will want to install a package manager, called Brew, then use this to install the GNU tools. +*** Install Brew +The easiest way to install is from the [[https://brew.sh][brew homepage]] + +After installed, you should be able to run this command in the terminal: + +#+BEGIN_SRC shell +brew -v +#+END_SRC + +And see output similar to: + +#+RESULTS: +#+begin_example +Homebrew 2.7.5 +Homebrew/homebrew-core (git revision 33c47; last commit 2021-01-17) +Homebrew/homebrew-cask (git revision 4dc8d; last commit 2021-01-17) +#+end_example + +*** Install GNU tools +Kubernetes expects to find a set of GNU command line tools. + +Install them now by entering this command in the terminal: + +#+begin_src shell +brew install coreutils ed findutils gawk gnu-sed gnu-tar grep make +#+end_src +*** Check which shell you're using +We want to add a code snippet to your shell's initialization script, the name of which changes depending on your shell. + +type: +#+BEGIN_SRC shell +echo $SHELL +#+END_SRC + +You will likely see either + +#+RESULTS: +#+begin_example +/bin/bash +#+end_example +or +#+RESULTS: +#+begin_example +/bin/zsh +#+end_example +*** Add code snippet to init script +Add the below script to the bottom of your ~\~/.bashrc~ or ~\~/.zshrc~ file, depending on if you're using ~bash~ or ~zsh~ respectively. + +#+begin_src +GNUBINS="$(find /usr/local/opt -type d -follow -name gnubin -print)" + +for bindir in ${GNUBINS[@]} +do + export PATH=$bindir:$PATH +done + +export PATH +#+end_src + +This scripts ensure your computer can find the tools we just installed. + +With all this set, you're reading to move forward! +*** Check out official docs +You can find them here: [[https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#setting-up-macos][k8s/community/development.md]] + +** Windows +For windows, you basically want to run Linux from within windows, which is simple to do on Windows 10 onward + +If you're on Windows 10, you want ot set up the linux subsystem. This has more steps than can fit in this guide, but +there are good installation instructions [[https://docs.microsoft.com/en-us/windows/wsl/install-win10][read the installaction instructions]] + +If youa re not on windows 10, then you'll want to run this all on a virtual machine. Vagrant is a good option for this, and the instructions for installing it can be read here: [[https://www.vagrantup.com/docs/installation][install vagrant]] +* Software Prerequisites +With your environment setup, you can now install the software we'll use for working with kubernetes: Docker, Git, SSH, and Go. +** Docker +First, check if you have Docker already installed, by running the command: +#+BEGIN_SRC shell +docker info +#+END_SRC + +If it says ~docker command not found~, you will want to install docker desktop from their[[https://www.docker.com/products/docker-desktop][ homepage]]. +** Git +In a terminal window run +`git --version` +If it is installed you will get a message like `git version 2.25.1` +Otherwise, you will want to install it!: +- [[https://github.com/git-guides/install-git#install-git-on-mac][Installing on macOS]] +- [[https://github.com/git-guides/install-git#install-git-on-linux][Installing on Linux]] +- [[https://github.com/git-guides/install-git#install-git-on-windows][Installing on Windows]] +** Go +*** Installing go +We want to make check is Go is installed and what version. +Open Command Prompt / CMD ot Terminal window, execute the command to check the Go version. Make sure you have the latest version of Go. +$ go version + +If you need to install Go the [[https://golang.org/doc/install][official installation page]] have struction for Linux, Mac and Windows + +*** Adding go to your path +and knowing how to find your $GOPATH -- We can look here: https://golang.org/doc/gopath_code.html +** SSH Keys +*** what is ssh +SSH is a secure protocol used as the primary means of connecting to Linux servers remotely. +It provides a text-based interface by spawning a remote shell. +After connecting, all commands you type in your local terminal are sent to the remote server and executed there. +SSH keys are a matching set of cryptographic keys which can be used for authentication. Each set contains a public and a private key. +The public key can be shared freely without concern, while the private key must be vigilantly guarded and never exposed to anyone. +*** creating a new ssh key +To generate an RSA key pair on your local computer, type: +- ssh-keygen +This will create to files in the .ssh directory. Your private key id_rsa. and public key id_rsa.pub + +* Github configuration +** Signing up for github account +** Configure git +To use get you need a Github account. +If you do not have an account yet go to the [[https://github.com/][Github]] website to sign up. +You'll need: +- name +- email +- password + +preparing for working with the k8s repo. +** Uploading your SSH Key +** Signing the CNCF CLA +* Forking and Cloning K8s +** brief tour of k8s repo +** forking to your own repo +** cloning k8s down to your own computer +* The Kubernetes git workflow +** k8s/k8s is 'upstream' +** you create a branch on your fork, and push and make changes. +** then open a pr in upstream, comparing across forks. +* Getting Additional Help +We won't be doing this live, but are there other resources we can offer for help? perhaps a slack channel that we'd be moderating during NCW times? A repo in which they can open issues for their questions? +* What's Next? +Outline of session 2. You have all the requirements, now we will build and hack on kubernetes! + +* Footnotes + +#+REVEAL_ROOT: https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.9.2 +# #+REVEAL_TITLE_SLIDE: +#+NOREVEAL_DEFAULT_FRAG_STYLE: YY +#+NOREVEAL_EXTRA_CSS: YY +#+NOREVEAL_EXTRA_JS: YY +#+REVEAL_HLEVEL: 2 +#+REVEAL_MARGIN: 0.1 +#+REVEAL_WIDTH: 1000 +#+REVEAL_HEIGHT: 600 +#+REVEAL_MAX_SCALE: 5.5 +#+REVEAL_MIN_SCALE: 0.2 +#+REVEAL_PLUGINS: (markdown notes highlight multiplex) +#+REVEAL_SLIDE_NUMBER: "" +#+REVEAL_SPEED: 1 +#+REVEAL_THEME: blood +#+REVEAL_THEME_OPTIONS: beige|black|blood|league|moon|night|serif|simple|sky|solarized|white +#+REVEAL_TRANS: cube +#+REVEAL_TRANS_OPTIONS: none|cube|fade|concave|convex|page|slide|zoom + +#+OPTIONS: num:nil +#+OPTIONS: toc:nil +#+OPTIONS: mathjax:Y +#+OPTIONS: reveal_single_file:nil +#+OPTIONS: reveal_control:t +#+OPTIONS: reveal-progress:t +#+OPTIONS: reveal_history:nil +#+OPTIONS: reveal_center:t +#+OPTIONS: reveal_rolling_links:nil +#+OPTIONS: reveal_keyboard:t +#+OPTIONS: reveal_overview:t diff --git a/ncw/session-02.org b/ncw/session-02.org new file mode 100644 index 0000000..647948e --- /dev/null +++ b/ncw/session-02.org @@ -0,0 +1,44 @@ +#+TITLE: Session 02 + +* Introduction +- In this Session we will introduce you to the make command and kubernetes cmd folder. +- You'll also learn about KinD (kubernetes in docker) +- We'll learn more about how k8s buid process works + + By the end, you will edit and build a kubernetes command that you can run on your own kind cluster! + **This session continues on Session 1. If you haven't done that one yet, do it first!** +* Agenda +- Intro to make +- Intro to CMD +- The Build Process +- Intro to KinD +- Editing and Building +- Running our command on KinD +* Make +** What it is +** Ensuring you have it on your computer +** How we use it +* The CMD Folder +** Where to find it +** What it is +* Making in Parts +Why do we not make all of kubernetes (no don't run make release) +What do we make in isolation? +* Verify Dev envrionment ready to go +/if needed, include instructions for each type of OS/ +** Have Docker +** Have git +** Have Go +*** GOPATH set +** Fork of k8s cloned to dev environment +* Run a make command +** cd into k8s from yr terminal +** make WHAT=cmd/kubectl +maybe edit the print output for fun, and see it change +* Make a KinD Cluster +** What is kind? +** Install Kind +** kind create cluster +* Use newly-built kubectl binary in the KinD cluster +* Additional Help +* What's Next? diff --git a/ncw/session-03.org b/ncw/session-03.org new file mode 100644 index 0000000..50ddc86 --- /dev/null +++ b/ncw/session-03.org @@ -0,0 +1,46 @@ +#+TITLE: Session 03 + +* Introduction +In this, we'll ramp up our abilities by adding testing into the mix! +Like session 2, we will edit, make, and run commands like kubectl, but now checking our builds with unit testing, using go test. +With these tests, we'll have increased confidence in contributing our work back upstream and so we'll also talk about pull requests, and the PR pre-submission practices. +* Agenda +- Setup our Dev environments +- introduce unit tests +- testing with go test and make +- PR's +- An intro to prow and test grid +* Setup +** Kind +** a working kubectl binary of some sort +** go +** make +* Edit our kubectl binary +Adjust its message again, or have it do something in addition +don't build just yet +* Unit Tests +** what are they? +** why they important? +** how k8s uses them +* Write a unit test for our kubectl binary +* check our test with go test +* check our test with make +* Test scope +** only run some tests +** run all tests +** time to run all tests +* PR's +- review the pr flow again +- review the PR pre-submission guidelines +- review the style guidelines +- show some of the checks done on an existing pr and the checks for the pre-submission and style +- what is doing these checks? +* Prow +** k8s git ops +** helps manage these steps of the pr +** ensures yr pr follows the guidelines and passes all existing tests. +* Testgrid +** show all the tests being run and their success +** this can be optional, and so +* Additional Help +* What's Next? diff --git a/pairing-template.org b/pairing-template.org index d5a7f73..8908b85 100644 --- a/pairing-template.org +++ b/pairing-template.org @@ -64,6 +64,5 @@ echo hello from nested # eval: (xclip-mode 0) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/people/berno/bq_ip_asn_start.org b/people/berno/bq_ip_asn_start.org new file mode 100644 index 0000000..5d0c3d2 --- /dev/null +++ b/people/berno/bq_ip_asn_start.org @@ -0,0 +1,157 @@ +#+TITLE: Bq Ip Asn Start +* Adding support for sql-bq +#+begin_src emacs-lisp :results silent +(defun ii-sql-comint-bq (product options &optional buf-name) + "Create a bq shell in a comint buffer." + ;; We may have 'options' like database later + ;; but for the most part, ensure bq command works externally first + (sql-comint product options buf-name) + ) +(defun ii-sql-bq (&optional buffer) + "Run bq by Google as an inferior process." + (interactive "P") + (sql-product-interactive 'bq buffer) + ) +(sql-add-product 'bq "Google Big Query" + :free-software nil + ;; :font-lock 'bqm-font-lock-keywords + ;; :syntax-alist 'bqm-mode-syntax-table ; invalid + :prompt-regexp "^[[:alnum:]-]+> " + ;; I don't think we have a continuation prompt + ;; but org-babel-execute:sql-mode requires it + ;; otherwise re-search-forward errors on nil + ;; when it requires a string + :prompt-cont-regexp "3a83b8c2z93c89889a4c98r2z34" + ;; :prompt-length 9 ; can't precalculate this + :sqli-program "bq" + :sqli-login nil ; probably just need to preauth + :sqli-options '("shell" "--quiet" "--format" "pretty") + :sqli-comint-func 'ii-sql-comint-bq + ) +#+end_src + +#+begin_src emacs-lisp :results silent +(sql-del-product 'bq) +#+end_src + +#+begin_src sql-mode :product bq +select 1; +#+end_src + +#+RESULTS: +#+begin_SRC example ++-----+ +| f0_ | ++-----+ +| 1 | ++-----+ +#+end_SRC + +#+begin_src emacs-lisp +(sql-product-interactive 'bq) +#+end_src + +#+RESULTS: +#+begin_src emacs-lisp +# +#+end_src + +* Use headings +- and subthing +Interesting right? +Aite mostly using this as scratch pad + +<#+begin_example +Building the list of distinct ipv4 with int and ip_strings +##### +SELECT + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS ip, + c_ip AS c_ip +FROM + k8s_artifacts_gcslogs_appspot.distinct_appspot_ip +WHERE + REGEXP_CONTAINS(c_ip, r"\.") +ORDER BY + ip +LIMIT + 1000000; +#### + +Going over the output from the above and pushing that to a file +#### +select ip, +c_ip, +int64_field_2 as asn, +string_field_4 as asn_name +from `k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.distinct_appspot_ip_int`, +`k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.ip2asn_int` +where +ip >= f0_ and ip <= f1_; +##### +I would like to take chunks of 10k and do the comparison streaming results to a table + + +This is the while loop structure I came up with: +#### +X = 1000 +Y = select count(*) from + + Select folds from tab1, tab2 +WHERE ROWNUMBER >= x - 10000 and ROWNUMBER <= x ... +x = x + 10000; + +##### + +Lets try and use that structure to do a simple select on one of the tables +####### +X = 10000 +Y = (select count(*) from distinct_appspot_ip_int) + +DO while X < y LOOP +select ip, +c_ip, +int64_field_2 as asn, +string_field_4 as asn_name +from `k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.distinct_appspot_ip_int`, +`k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.ip2asn_int` +where +ip >= f0_ and ip <= f1_; +WHERE ROWNUMBER >= x - 10000 and ROWNUMBER <= x + 10000 +x = x + 1; + +END WHILE; +#### + +This is the version I got to run +###### +DECLARE X INT64; +DECLARE Y INT64; + +CREATE TEMP TABLE tmp_ip_int AS +SELECT ip, + c_ip, + RANK() OVER(ORDER BY ip) row_number + FROM k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.distinct_appspot_ip_int; + +SET X = 0; +SET Y = (select count(*) from k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.distinct_appspot_ip_int); + +WHILE X < y DO +SELECT ip, +c_ip, +int64_field_2 as asn, +string_field_4 as asn_name +FROM `tmp_ip_int`, +`k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.ip2asn_int` +WHERE +ip >= f0_ and ip <= f1_ +AND row_number = x; +SET x = x + 1; + +END WHILE; +####### +Problem, I still need to output that to a file... + + + +#+end_example diff --git a/people/berno/bq_scratch.org b/people/berno/bq_scratch.org new file mode 100644 index 0000000..52aff7f --- /dev/null +++ b/people/berno/bq_scratch.org @@ -0,0 +1,33 @@ +#+TITLE: Bq_scratch +Berno's BQ scratch bucket +* Old UI explorations +--This was used when we were exploring ip2asn matching.-- +Tab1, +#+begin_src bq +select NET.IPV4_TO_INT64(ipv4_c_ip) as ip, +NET.IP_TO_STRING (ipv4_c_ip), +int64_field_2 as asn, +string_field_4 as asn_name +from `k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.distinct_appspot_ip_netip_1`, +`k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.ip2asn` +where +RANGE_BUCKET( +NET.IPV4_TO_INT64(ipv4_c_ip), +[ +NET.IPV4_TO_INT64(NET.IP_FROM_STRING(string_field_0)), +NET.IPV4_TO_INT64(NET.IP_FROM_STRING(string_field_1)) +] +)=1; +#+end_src + +Convert IPv4 strings to int on peeringdb_expanded data +#+begin_src bq +SELECT + cidr_ip, + start_ip, + end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(start_ip)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(end_ip)) AS end_ip_int +FROM + k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.peeringdb_expanded_ipv4_20210524; +#+end_src diff --git a/people/berno/gcp_gcs_access.org b/people/berno/gcp_gcs_access.org new file mode 100644 index 0000000..1fadc50 --- /dev/null +++ b/people/berno/gcp_gcs_access.org @@ -0,0 +1,235 @@ +#+TITLE: Gcp Gcs Access +* check gsutil auth +#+BEGIN_SRC shell +gcloud auth list 2>&1 +: +#+END_SRC + +#+RESULTS: +#+begin_example + Credentialed Accounts +ACTIVE ACCOUNT +,* bb@ii.coop + +To set the active account, run: + $ gcloud config set account `ACCOUNT` + +#+end_example +* set default project +#+BEGIN_SRC shell +gcloud config set project apisnoop +#+END_SRC + +#+RESULTS: +#+begin_example +#+end_example + +* create bucket for logs +#+BEGIN_SRC shell +gsutil mb gs://bb-apisnoop-logs 2>&1 +: +#+END_SRC + +#+RESULTS: +#+begin_example +Creating gs://bb-apisnoop-logs/... +#+end_example + +* set pormissions to allow gcs permissions to the bucket +https://download.huihoo.com/google/gdgdevkit/DVD1/developers.google.com/storage/docs/accesslogs.html + +#+BEGIN_SRC shell +gsutil acl ch -g cloud-storage-analytics@google.com:W gs://bb-apisnoop-logs/ 2>&1 +: +#+END_SRC + +#+RESULTS: Initial sans OWNER-role +#+begin_example +CommandException: Failed to set acl for gs://apisnoop-logs/. Please ensure you have OWNER-role access to this resource. +#+end_example + +** get logging +#+BEGIN_SRC shell +gsutil logging get gs://apisnoop +#+END_SRC + +#+RESULTS: +#+begin_example +{"logBucket": "bb-apisnoop-logs", "logObjectPrefix": "accessLog"} +#+end_example + +#+RESULTS: Initial +#+begin_example +gs://apisnoop/ has no logging configuration. +#+end_example +* Enableg logging on the bucket to the bucket +This was the first pass +#+BEGIN_SRC tmate +gsutil logging set on -b gs://bb-apisnoop-logs gs://apisnoop +#+END_SRC + +#+RESULTS: +#+begin_example +#+end_example + +Pointing it to artifact.apisnoop.appspot.com +#+BEGIN_SRC tmate +gsutil logging set on -b gs://bb-apisnoop-logs -o accessLog gs://artifacts.apisnoop.appspot.com +#+END_SRC + +#+RESULTS: +#+begin_example +#+end_example +* Do a docker pull +#+BEGIN_SRC tmate +docker pull gcr.io/apisnoop/iimacs:0.9.15 +#+END_SRC +* Look for logs +#+BEGIN_SRC tmate +gsutil ls -la gs://bb-apisnoop-logs/ +#+END_SRC + +#+BEGIN_SRC tmate +gsutil cp -r gs://bb-apisnoop-logs/ . +#+END_SRC +Manually load one of the logs so we can look at it in bigQuery +#+BEGIN_SRC tmate +bq load --skip_leading_rows=1 storageanalysis.usage gs://bb-apisnoop-logs/* +#+END_SRC + +#+BEGIN_SRC tmate +bq shell +#+END_SRC + +#+BEGIN_SRC tmate +SELECT cs_user_agent, count(*) as count FROM [storageanalysis.usage] GROUP BY cs_user_agent +#+END_SRC + +#+BEGIN_EXAMPLE +apisnoop> SELECT cs_user_agent, count(*) as count FROM [storageanalysis.usage] GROUP BY cs_user_agent +Waiting on bqjob_r56b6fb5ec7f3e7bf_00000178ce711e86_1 ... (0s) Current status: DONE ++----------------------------------------------------------------------------------------------------------------------------------+-------+ +| cs_user_agent | count | ++----------------------------------------------------------------------------------------------------------------------------------+-------+ +| Google-API-Java-Client Google-HTTP-Java-Client/1.26.0-SNAPSHOT (gzip) | 3 | +| apitools gsutil/4.35 Python/2.7.13 (linux2) google-cloud-sdk/230.0.0 analytics/disabled,gzip(gfe) | 9 | +| Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36,gzip(gfe) | 6 | +| apitools Python/3.8.5 gsutil/4.61 (linux) analytics/disabled interactive/True command/logging google-cloud-sdk/336.0.0,gzip(gfe) | 2 | +| curl/7.52.1,gzip(gfe) | 4 | +| Helm/2.11.0,gzip(gfe) | 4280 | +| apitools gsutil/4.35 Python/2.7.13 (linux2) google-cloud-sdk/230.0.0 analytics/disabled | 2 | +| cloud_storage_cookieauth Google-API-Java-Client Google-HTTP-Java-Client/1.26.0-SNAPSHOT (gzip) | 2 | ++----------------------------------------------------------------------------------------------------------------------------------+-------+ +#+END_EXAMPLE + +#+BEGIN_SRC tmate +SELECT time_micros as timestamp, cs_method FROM [storageanalysis.usage] where cs_object="containers/images/sha256:f91914f4e2b0beff949c98a78c5103a496ae185cbc2996ad7e1f307f7d13e771" +#+END_SRC +#+BEGIN_SRC tmate +SELECT time_micros as timestamp, cs_method FROM [storageanalysis.usage] where cs_object like "containers/images/sha256" +#+END_SRC + +* Auth as service account +https://cloud.google.com/sdk/gcloud/reference/auth/activate-service-account +#+BEGIN_SRC tmate +gcloud auth activate-service-account reporting@apisnoop.iam.gserviceaccount.com --key-file=/home/ii/bb-reporting-key20210415.json --project=apisnoop +#+END_SRC + +* Now I test if I can get the logs +Lets try a quick copy with gsutil cp +#+BEGIN_SRC tmate +gsutil cp -r gs://bb-apisnoop-logs/ . +#+END_SRC +This gives me: +#+BEGIN_EXAMPLE +Satus': '403'}>, content &1 +: +#+END_SRC + +#+RESULTS: +#+begin_example + Credentialed Accounts +ACTIVE ACCOUNT +,* bb@ii.coop + reporting@apisnoop.iam.gserviceaccount.com + +To set the active account, run: + $ gcloud config set account `ACCOUNT` + +#+end_example +* Repo we are working with +git clone https://github.com/GoogleCloudPlatform/terraformer 2>&1 +* Install terrraformer +This did not did not end up working. I tried installing latest and 0.8.11 + +terraformer import google --resources=gcs --project=apisnoop +2021/04/29 16:15:38 unknown flag: --resources +#+BEGIN_SRC tmate +export PROVIDER={google,aws,kubernetes,all} +curl -LO https://github.com/GoogleCloudPlatform/terraformer/releases/download/$(curl -s https://api.github.com/repos/GoogleCloudPlatform/terraformer/releases/latest | grep tag_name | cut -d '"' -f 4)/terraformer-${PROVIDER}-linux-amd64 +chmod +x terraformer-${PROVIDER}-linux-amd64 +sudo cp terraformer-${PROVIDER}-linux-amd64 /usr/local/bin/terraformer +#+END_SRC +#+BEGIN_SRC tmate +export PROVIDER={all,google,aws,kubernetes} +curl -LO https://github.com/GoogleCloudPlatform/terraformer/releases/download/$(curl -s https://api.github.com/repos/GoogleCloudPlatform/terraformer/releases/tags/0.8.11 | grep tag_name | cut -d '"' -f 4)/terraformer-${PROVIDER}-linux-amd64 +chmod +x terraformer-${PROVIDER}-linux-amd64 +sudo cp terraformer-${PROVIDER}-linux-amd64 /usr/local/bin/terraformer +#+END_SRC +* Import GCS for apisnoop +#+BEGIN_SRC tmate +terraformer import google --resources=gcs --projects=apisnoop +#+END_SRC diff --git a/remote-template.org b/remote-template.org index 6f52a81..fecee75 100644 --- a/remote-template.org +++ b/remote-template.org @@ -247,6 +247,5 @@ tmate -S /tmp/kind.kind-ci-box.iisocket new-session -A -s kind -n main \ # eval: (xclip-mode 0) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/remote.org b/remote.org index e648d6f..c43404f 100644 --- a/remote.org +++ b/remote.org @@ -99,6 +99,5 @@ As soon as you load this file, create a new terminal and paste to create target # eval: (gui-select-text start-tmate-command) # org-babel-tmate-session-prefix: "" # org-babel-tmate-default-window-name: "main" -# org-confirm-babel-evaluate: nil # org-use-property-inheritance: t # End: diff --git a/research/apisnoop_release_process.org b/research/apisnoop_release_process.org new file mode 100644 index 0000000..00c18c3 --- /dev/null +++ b/research/apisnoop_release_process.org @@ -0,0 +1,64 @@ +#+TITLE: Apisnoop Release Process +** How ii makes APISnoop releases :kubernetes:apisnoop:cncf: +*** This is how ii makes apisnoop releases +In this post we will capture the details involved in building and promoting a image from the apisnoop repo +*** Repo and the tags we have: +https://github.com/cncf/apisnoop/tags +*** Overview of image publish jobs and how we can use them to promote our images +Documentation: [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/image-pushing/README.md#image-pushing-jobs][test-infra-config/jobs/image-pushing/README]] +- We set up a [[https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/README.md#managing-kubernetes-container-registries][GCR]] to build our images +- We set up a image [[https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/README.md#image-promoter][promoter]] +*** Prow definition of image-build for apisnoop: +- Use this [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/image-pushing/README.md][documentation]], the [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/image-pushing/README.md#prow-config-template][prow-config-template]] is very strict. +- This the postsubmit job we defined in + [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/image-pushing/k8s-staging-apisnoop.yaml][test-infra/config/jobs/image/pushing/k8s-staging-apisnoop.yaml]] +#+BEGIN_SRC yaml + decorate: true + branches: + - ^main$ +#+END_SRC +#+BEGIN_SRC yaml +containers: + - image: gcr.io/k8s-testimages/image-builder:v20210302-aa40187 + command: + - /run.sh + args: + # this is the project GCB will run in, which is the same as the GCR images are pushed to. + - --project=k8s-staging-apisnoop + - --scratch-bucket=gs://k8s-staging-apisnoop-gcb + - --env-passthrough=PULL_BASE_REF + - apps/snoopdb +#+END_SRC +*** The cloudbuild.yaml this job runs +[[https://github.com/cncf/apisnoop/blob/main/apps/snoopdb/cloudbuild.yaml][cncf/apisnoop/apps/snoopdb/cloudbuild.yaml]] +#+BEGIN_SRC yaml +steps: + - name: gcr.io/cloud-builders/docker + args: ['build', '-t', 'gcr.io/$PROJECT_ID/snoopdb:$_GIT_TAG', + '--build-arg', 'IMAGE_ARG=gcr.io/$PROJECT_ID/snoopdb:$_GIT_TAG', + './postgres'] +images: + - 'gcr.io/$PROJECT_ID/snoopdb:$_GIT_TAG' +substitutions: + _GIT_TAG: '12345' +options: + substitution_option: 'ALLOW_LOOSE' +#+END_SRC +*** Prow jobs to build/push snoopdb: +Where you can find the above job in prow: [[https://prow.k8s.io/?job=apisnoop-push-snoopdb-images][prow-apisnoop-push-snoopdb-images]] +*** Logs +Logs for the above [[https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/apisnoop-push-snoopdb-images/1384977461019676672][prow-logs-apisnoop-push-snoopdb-images]] +*** GCB Requires a promotion process +Some information om image [[https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#readme][promotion process]] +- Make changes to k8s.io.k8s.gcr.io/images/k8s-staging-apisnoop/images.yaml +- Create PR that will then trigger a check that k8s-ci-robot gets 'Job succeeded' +- The merged PR will trigger the promotion 'post-k8sio-cip' +- Promoted images can be viewed on [[https://console.cloud.google.com/gcr/images/k8s-artifacts-prod][k8s-artifacts-prod]] +Options for using tags [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/image-pushing/README.md#custom-substitutions][tags-custom-substitutions]] +Apisnoop image file that holds our metadata: [[https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/images/k8s-staging-apisnoop/images.yaml][k8s.io/k8s.gcr.io/images/k8s-staging-apisnoop/images.yaml]] +*** Promotion Definitions +Infra promotor: +**** [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes/sig-release/cip/container-image-promoter.yaml#L1][Presubmits]] +**** [[https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes/wg-k8s-infra/trusted/releng/releng-trusted.yaml#L3][Postsubmits]] +*** Promotion Jobs +https://prow.k8s.io/?job=post-k8sio-image-promo diff --git a/research/asn-data-pipeline/Dockerfile b/research/asn-data-pipeline/Dockerfile new file mode 100644 index 0000000..5a1db68 --- /dev/null +++ b/research/asn-data-pipeline/Dockerfile @@ -0,0 +1,32 @@ +FROM golang:1.16.5-stretch as godeps +RUN go get -u github.com/mikefarah/yq/v4 && \ + test -f /go/bin/yq + +FROM postgres:12.7 +RUN apt-get update && \ + apt-get install -y curl && \ + echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" \ + | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + python3 \ + python3-dev \ + python3-pip \ + python3-wheel \ + python3-setuptools \ + jq \ + curl \ + git \ + gcc \ + libc6-dev \ + gettext-base \ + procps \ + google-cloud-sdk && \ + rm -rf /var/lib/apt/lists/* +RUN pip3 install pyasn +WORKDIR /app +COPY --from=godeps /go/bin/yq /usr/local/bin/yq +COPY ./pg-init.d /docker-entrypoint-initdb.d +COPY ./app . +ENV POSTGRES_PASSWORD=postgres diff --git a/research/asn-data-pipeline/app/add_c_ip_int_to_usage_all.sql b/research/asn-data-pipeline/app/add_c_ip_int_to_usage_all.sql new file mode 100644 index 0000000..d66497b --- /dev/null +++ b/research/asn-data-pipeline/app/add_c_ip_int_to_usage_all.sql @@ -0,0 +1 @@ +SELECT *, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.usage_all_raw` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") diff --git a/research/asn-data-pipeline/app/add_c_ip_int_to_usage_all_no_logs.sql b/research/asn-data-pipeline/app/add_c_ip_int_to_usage_all_no_logs.sql new file mode 100644 index 0000000..826ee9a --- /dev/null +++ b/research/asn-data-pipeline/app/add_c_ip_int_to_usage_all_no_logs.sql @@ -0,0 +1 @@ +SELECT *, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_LOGS}.usage_all_raw` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") diff --git a/research/asn-data-pipeline/app/distinct_c_ip_count.sql b/research/asn-data-pipeline/app/distinct_c_ip_count.sql new file mode 100644 index 0000000..22c61df --- /dev/null +++ b/research/asn-data-pipeline/app/distinct_c_ip_count.sql @@ -0,0 +1 @@ +SELECT DISTINCT c_ip, COUNT(c_ip) AS Total_Count FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.usage_all_raw` GROUP BY c_ip ORDER BY Total_Count DESC diff --git a/research/asn-data-pipeline/app/distinct_c_ip_count_logs.sql b/research/asn-data-pipeline/app/distinct_c_ip_count_logs.sql new file mode 100644 index 0000000..e13abc6 --- /dev/null +++ b/research/asn-data-pipeline/app/distinct_c_ip_count_logs.sql @@ -0,0 +1 @@ +SELECT DISTINCT c_ip, COUNT(c_ip) AS Total_Count FROM `${GCP_BIGQUERY_DATASET_LOGS}.usage_all_raw` GROUP BY c_ip ORDER BY Total_Count DESC diff --git a/research/asn-data-pipeline/app/distinct_ip_int.sql b/research/asn-data-pipeline/app/distinct_ip_int.sql new file mode 100644 index 0000000..22214cd --- /dev/null +++ b/research/asn-data-pipeline/app/distinct_ip_int.sql @@ -0,0 +1,2 @@ +## Get single clientip as int. +SELECT c_ip AS c_ip, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") diff --git a/research/asn-data-pipeline/app/distinct_ipint_only.sql b/research/asn-data-pipeline/app/distinct_ipint_only.sql new file mode 100644 index 0000000..f27e074 --- /dev/null +++ b/research/asn-data-pipeline/app/distinct_ipint_only.sql @@ -0,0 +1,2 @@ +## Get single clientip as int. +SELECT NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") diff --git a/research/asn-data-pipeline/app/ext-ip-asn.sql b/research/asn-data-pipeline/app/ext-ip-asn.sql new file mode 100644 index 0000000..93c52c2 --- /dev/null +++ b/research/asn-data-pipeline/app/ext-ip-asn.sql @@ -0,0 +1,9 @@ +SELECT + asn as asn, + ip as cidr_ip, + ip_start as start_ip, + ip_end as end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_start)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_end)) AS end_ip_int + FROM `k8s-infra-ii-sandbox.${GCP_BIGQUERY_DATASET_WITH_DATE}.pyasn_ip_asn_extended` + WHERE regexp_contains(ip_start, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"); diff --git a/research/asn-data-pipeline/app/ip-from-pyasn.py b/research/asn-data-pipeline/app/ip-from-pyasn.py new file mode 100644 index 0000000..0d3e6cd --- /dev/null +++ b/research/asn-data-pipeline/app/ip-from-pyasn.py @@ -0,0 +1,40 @@ +## Import pyasn and csv +import pyasn +import csv +import sys + +## Set file path +asnFile = sys.argv[1] +asnDat = sys.argv[2] +pyAsnOutput = sys.argv[3] +## Open asnNumFile and read +asnNum = [line.rstrip() for line in open(asnFile, "r+")] + +## assign our dat file connection string +asndb = pyasn.pyasn(asnDat) +## Declare empty dictionary +destDict = {} +singleAsn = "" + +missingSubnets = [] +## Loop through list of asns +for singleAsn in asnNum: + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + ## Add checking to make sure we have subnets + ## TODO: insert asn with no routes so we know which faiGCP_BIGQUERY_DATASETled without having to do a lookup + if subnets: + ## Add subnets to our dictionaries with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + ## This is what lets us append each loop to the final destDict + destDict.update(originAsnDict) + +if len(missingSubnets) > 0: + print("Subnets missing from ASNs: ", missingSubnets) + +## Open handle to output file +resultsCsv = open(pyAsnOutput, "w") +# write to csv +writer = csv.writer(resultsCsv) +for key, value in destDict.items(): + writer.writerow([key, value]) diff --git a/research/asn-data-pipeline/app/join_all_the_things.sql b/research/asn-data-pipeline/app/join_all_the_things.sql new file mode 100644 index 0000000..e1d1556 --- /dev/null +++ b/research/asn-data-pipeline/app/join_all_the_things.sql @@ -0,0 +1 @@ +SELECT time_micros, A.c_ip, c_ip_type, c_ip_region, cs_method, cs_uri, sc_status, cs_bytes, sc_bytes, time_taken_micros, cs_host, cs_referer, cs_user_agent, s_request_id, cs_operation, cs_bucket, cs_object, asn, name_with_yaml_name FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.usage_all_raw_int` AS A FULL OUTER JOIN `${GCP_BIGQUERY_DATASET_WITH_DATE}.6_ip_range_2_ip_lookup` B ON A.c_ip_int=B.c_ip diff --git a/research/asn-data-pipeline/app/join_all_the_things_no_logs.sql b/research/asn-data-pipeline/app/join_all_the_things_no_logs.sql new file mode 100644 index 0000000..a6a2013 --- /dev/null +++ b/research/asn-data-pipeline/app/join_all_the_things_no_logs.sql @@ -0,0 +1 @@ +SELECT time_micros, A.c_ip, c_ip_type, c_ip_region, cs_method, cs_uri, sc_status, cs_bytes, sc_bytes, time_taken_micros, cs_host, cs_referer, cs_user_agent, s_request_id, cs_operation, cs_bucket, cs_object, asn, name_with_yaml_name FROM `${GCP_BIGQUERY_DATASET_LOGS}.usage_all_raw_int` AS A FULL OUTER JOIN `${GCP_BIGQUERY_DATASET_WITH_DATE}.6_ip_range_2_ip_lookup` B ON A.c_ip_int=B.c_ip diff --git a/research/asn-data-pipeline/app/potaroo_extra_yaml_name_column.sql b/research/asn-data-pipeline/app/potaroo_extra_yaml_name_column.sql new file mode 100644 index 0000000..64084d8 --- /dev/null +++ b/research/asn-data-pipeline/app/potaroo_extra_yaml_name_column.sql @@ -0,0 +1,2 @@ +## Potaroo data with extra column for yaml name +SELECT asn, companyname, name_yaml FROM ( SELECT asn, companyname FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.potaroo_all_asn_name`) A LEFT OUTER JOIN ( SELECT asn_yaml, name_yaml FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.vendor_yaml`) B ON A.asn=B.asn_yaml diff --git a/research/asn-data-pipeline/app/potaroo_yaml_name_subbed.sql b/research/asn-data-pipeline/app/potaroo_yaml_name_subbed.sql new file mode 100644 index 0000000..00392e5 --- /dev/null +++ b/research/asn-data-pipeline/app/potaroo_yaml_name_subbed.sql @@ -0,0 +1,2 @@ +## Potaroo with company names subbed out +SELECT A.asn, A.companyname, case when name_yaml is not null then name_yaml else B.companyname end as name_with_yaml_name FROM ( SELECT asn, companyname FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.3_potaroo_with_yaml_name_column`) A LEFT JOIN ( SELECT asn, companyname, name_yaml FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.3_potaroo_with_yaml_name_column`) B ON A.asn=B.asn diff --git a/research/asn-data-pipeline/app/vendor_with_company_name.sql b/research/asn-data-pipeline/app/vendor_with_company_name.sql new file mode 100644 index 0000000..30a141e --- /dev/null +++ b/research/asn-data-pipeline/app/vendor_with_company_name.sql @@ -0,0 +1,2 @@ +## Add company name to vendor +SELECT A.asn, cidr_ip, start_ip, end_ip, start_ip_int, end_ip_int,name_with_yaml_name FROM ( SELECT asn, cidr_ip, start_ip, end_ip, start_ip_int, end_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.vendor`) A LEFT OUTER JOIN ( SELECT asn, name_with_yaml_name FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}..4_potaroo_with_yaml_name_subbed`) B ON A.asn=B.asn diff --git a/research/asn-data-pipeline/asn-data.org b/research/asn-data-pipeline/asn-data.org new file mode 100644 index 0000000..3c3db1f --- /dev/null +++ b/research/asn-data-pipeline/asn-data.org @@ -0,0 +1,108 @@ +#+TITLE: ASN DATA +Process to document manually loading/transform gs bucket logs for report consumption +* Make sure we are logged in and bq points to the right datasets + +#+begin_src tmate :window prepare +gcloud auth login +#+end_src + +#+begin_src shell :results silent +gcloud config set project k8s-infra-ii-sandbox +#+end_src + +#+begin_src shell +gcloud auth list +#+end_src + +#+RESULTS: +#+begin_example + Credentialed Accounts +ACTIVE ACCOUNT +,* caleb@ii.coop +#+end_example + +Make sure the sandbox is our default project +#+begin_src shell +gcloud config list --format 'value(core.project)' 2>/dev/null +#+end_src + +#+RESULTS: +#+begin_example +k8s-infra-ii-sandbox +#+end_example + +* Load data +Lets see what we have in the bucket +tldr I can only put one wildcard on bq load and the bucket contains storage data we do not want +I realize this is amateur hour, but to confirm I get all the logs I am wildcarding the load of each type +#+begin_src tmate :window k8s-gslogs +gsutil ls gs://k8s-artifacts-gcslogs +#+end_src + +#+begin_example +gs://k8s-artifacts-gcslogs/us.artifacts.k8s-artifacts-prod.appspot.com_usage_2021_05_25_18_00_00_0bc6b45a8e79fb5d51_v0 +gs://k8s-artifacts-gcslogs/k8s-artifacts-prod_usage_2021_05_24_22_00_00_0317b35349b09ca5d5_v0 +gs://k8s-artifacts-gcslogs/k8s-artifacts-kind_usage_2021_05_25_05_00_00_0ab0ee4ec3d7790965_v0 +gs://k8s-artifacts-gcslogs/k8s-artifacts-csi_usage_2021_05_24_23_00_00_092fb34ad7a61a8037_v0 +gs://k8s-artifacts-gcslogs/k8s-artifacts-cri-tools_usage_2021_05_25_10_00_00_070d5ffe2f0e3dfc72_v0 +gs://k8s-artifacts-gcslogs/k8s-artifacts-cni_usage_2021_05_24_22_00_00_0a72aa93793814a69b_v0 +gs://k8s-artifacts-gcslogs/asia.artifacts.k8s-artifacts-prod.appspot.com_usage_2021_05_20_23_00_00_00c9c9dd0fd526a744_v0 +gs://k8s-artifacts-gcslogs/eu.artifacts.k8s-artifacts-prod.appspot.com_usage_2021_05_20_23_00_00_09a4720ff9db2c8cd7_v0 +#+end_example + +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/us.artifacts.k8s-artifacts-prod.appspot.com_usage* +#+end_src + +FAIL +There appears to be about 15k out of 10 million bad rows, I do not know what causes the bad data +I got around it by adding `--max_bad_records=20000` below +#+begin_src tmate :window k8s-gslogs +bq load --autodetect --max_bad_recprds=2000 k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/k8s-artifacts-prod_usage* +#+end_src + +Done +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/k8s-artifacts-kind_usage* +#+end_src + +Done +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/k8s-artifacts-csi_usage* +#+end_src + +Done +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/k8s-artifacts-cri-tools_usage* +#+end_src + +DOne +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/k8s-artifacts-cni_usage* +#+end_src + + +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/asia.artifacts.k8s-artifacts-prod.appspot.com_usage* +#+end_src + + +#+begin_src tmate :window k8s-gslogs +bq load --autodetect k8s_artifacts_dataset_bb_test.usage_us_prod_kind_sci_cri_cni_asia_eu gs://k8s-artifacts-gcslogs/eu.artifacts.k8s-artifacts-prod.appspot.com_usage* +#+end_src +* Transformations +This gets all the columns +TODO: add table output destination +#+begin_src shell +bq query { +SELECT +DATE(TIMESTAMP_MICROS(time_micros)) AS data_date, +c_ip, +sc_status, +sc_bytes, +cs_referer, +REGEXP_EXTRACT_ALL(cs_referer,'^https://k8s.gcr.io/[^/]+/([^/]+)/' ) AS resource, +REGEXP_EXTRACT_ALL(cs_referer, r'([^$:]+$)') AS hash_num +FROM `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.usage_all` +} +#+end_src diff --git a/research/asn-data-pipeline/asn-etl-pipeline.yaml b/research/asn-data-pipeline/asn-etl-pipeline.yaml new file mode 100644 index 0000000..14c4c91 --- /dev/null +++ b/research/asn-data-pipeline/asn-etl-pipeline.yaml @@ -0,0 +1,71 @@ +# Running as a Kubernetes CronJob + +# Define the CronJob + +apiVersion: batch/v1 +kind: CronJob +metadata: + name: asn-etl-pipeline + labels: + app: asn-etl-pipeline +spec: + schedule: "*/2 * * * *" + concurrencyPolicy: Forbid + jobTemplate: + metadata: + name: asn-etl-pipeline + spec: + parallelism: 1 + backoffLimit: 0 + template: + metadata: + labels: + app: asn-etl-pipeline + spec: + restartPolicy: Never + containers: + - name: asn-etl-pipeline + image: asn-etl-pipeline + imagePullPolicy: Never + # command: + # - sleep + # - +Inf + volumeMounts: + - name: gcp-app-creds + mountPath: /etc/asn-etl-pipeline + # - name: gcp-user-account + # mountPath: /tmp/gcp-user-account/.config/gcloud + env: + - name: POSTGRES_PASSWORD + value: postgres + - name: GCP_PROJECT + value: k8s-infra-ii-sandbox + - name: GCP_SERVICEACCOUNT + value: asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com + - name: GCP_BIGQUERY_DATASET + value: etl_script_generated_set + - name: GCP_BIGQUERY_DATASET_LOGS + value: etl_script_generated_set_prod + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/asn-etl-pipeline/asn-etl-pipeline-gcp-sa.json + - name: ASN_DATA_PIPELINE_RETAIN + value: "false" + # - name: ASN_DATA_PIPELINE_PREINIT + # value: | + # mkdir -p /var/lib/postgresql/.config ; + # cp -r /tmp/gcp-user-account/.config/gcloud/..data/ /var/lib/postgresql/.config/gcloud/ ; + # mkdir -p /var/lib/postgresql/.config/gcloud/configurations/ ; + # cat << EOF > ~/.config/gcloud/configurations/config_default + # [core] + # project = k8s-infra-ii-sandbox + # account = bb@ii.coop + # EOF + # gsutil ls + volumes: + - name: gcp-app-creds + secret: + secretName: gcp-app-creds + # - name: gcp-user-account + # secret: + # secretName: gcp-user-account + # defaultMode: 0777 diff --git a/research/asn-data-pipeline/asn_k8s_yaml.org b/research/asn-data-pipeline/asn_k8s_yaml.org new file mode 100644 index 0000000..d2e65ea --- /dev/null +++ b/research/asn-data-pipeline/asn_k8s_yaml.org @@ -0,0 +1,302 @@ +#+TITLE: Asn K8s Yaml +Goal: Parse yaml from https://github.com/kubernetes/k8s.io/blob/main/registry.k8s.io/infra/meta/asns/ +to allow 302, redirects relationship with company to asn + +* Parsing our own yaml +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/microsoft.yaml | yq e . -j - +#+end_src + +#+RESULTS: +#+begin_example +{ + "name": "microsoft", + "redirectsTo": { + "registry": "kubernetes.acr.io", + "artifacts": "k8s-artifacts.acr.io" + }, + "asns": [ + 12076, + 8075, + 8068, + 8069, + 16550 + ] +} +#+end_example + +Create section per vendor so we can render the yamls +* Microsoft +#+begin_src yaml tangle (concat (getenv "HOME") "/tmp/microsoft.yaml") +name: microsoft +contacts: +- email@microsoft.com # This needs to be email we can use to raise issues with redirect. +redirectsTo: + registry: kubernetes.acr.io + artifacts: k8s-artifacts.acr.io +metadata: + ipRanges: + datasource: # link to json url + jqReformatFilter: | + . ## This will be the query to parse the above url +asns: +- 12076 +- 8075 +- 8068 +- 8069 +- 16550 +#+end_src +** Parsing k8s.io yaml +*** microsoft +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/microsoft.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/microsoft_yaml.csv +#+end_src + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/microsoft_yaml.csv +#+end_src + +*** google +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/google.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/google_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/google_yaml.csv +#+end_src + +*** amazon +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/amazon.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/amazon_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/amazon_yaml.csv +#+end_src + +*** alibabagroup +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/alibabagroup.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/alibabagroup_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/alibabagroup_yaml.csv +#+end_src + +*** baidu +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/baidu.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/baidu_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/baidu_yaml.csv +#+end_src + +*** digitalocean +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/digitalocean.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/digitalocean_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/digitalocean_yaml.csv +#+end_src + +*** equinixmetal +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/equinixmetal.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/equinixmetal_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/equinixmetal_yaml.csv +#+end_src + +*** huawei +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/huawei.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/huawei_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/huawei_yaml.csv +#+end_src + +*** tencentcloud +#+begin_src shell +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/tencentcloud.yaml | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/tencentcloud_yaml.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +#+begin_src tmate :window bq_results +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/tencentcloud_yaml.csv +#+end_src + +** Parsing mirosoft json +Still in progress +#+begin_src shell +curl 'https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20210607.json' | jq -r \ + '.values[] | .properties.platform as $service | .properties.region as $region | .properties.addressPrefixes[] | [., $service, $region] | @csv' > /tmp/microsoft_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +* Google +#+begin_src yaml tangle (concat (getenv "HOME") "/tmp/google.yaml") +name: google +contacts: +- email@google.com # This needs to be email we can use to raise issues with redirect. +redirectsTo: + registry: k8s.gcr.io + artifacts: kubernetes-release.storage.googleapis.com +metadata: + ipRanges: + # link to json url + datasource: https://www.gstatic.com/ipranges/cloud.json + # This will be the query to parse the above url + jqReformatFilter: | + '.prefixes[] | [.ipv4Prefix, .service, .scope] | @csv' +asns: +- 39190 +- 139070 +- 45566 +- 15169 +- 19527 +- 36040 +- 43515 +- 16550 +#+end_src +** jq to get company name, ipRanges and region from amazon +The jq below is what will need to go into the yaml +#+begin_src shell +curl 'https://www.gstatic.com/ipranges/cloud.json' | jq -r '.prefixes[] | [.ipv4Prefix, .service, .scope] | @csv' > /tmp/google_raw_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example +#+begin_src shell +ls -al /tmp/google_raw_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 19947 Jun 8 10:04 /tmp/google_raw_subnet_region.csv +#+end_example +#+begin_src shell +bq load --autodetect k8s_artifacts_dataset_bb_test.google_raw_subnet_region /tmp/google_raw_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +* Amazon +#+begin_src yaml +name: amazon +contacts: +- email@amazon.com # This needs to be email we can use to raise issues with redirect. +redirectsTo: + registry: kubernetes.ecr.io + artifacts: kubernetes.s3.amazon.com +metadata: + ipRanges: + # link to json url + datasource: https://ip-ranges.amazonaws.com/ip-ranges.json + # This will be the query to parse the above url + jqReformatFilter: | + '.prefixes[] | [.ip_prefix, .service, .region] | @csv' +asns: +- 16509 +#+end_src +** additional amazon asns? +Ask about: +#+begin_example +"AS7224", "AMAZON-AS, US" +"AS8987", "AMAZON EXPANSION, IE" +"AS10124", "AMAZON-AP-RESOURCES-AS-AP-NRT Amazon Data Services Japan KK, JP" +"AS14618", "AMAZON-AES, US" +"AS16509", "AMAZON-02, US" +"AS17493", "AMAZON-AP-RESOURCES-AS-AP-SIN Amazon Asia-Pacific Resources Private Limited, SG" +"AS38895", "AMAZON-AS-AP Amazon.com Tech Telecom, JP" +"AS52994", "SODECAM - Soc de Desenv. Cultural do Amazonas S/A, BR" +"AS58588", "AMAZON-AU Amazon Corporate Services, AU" +"AS61577", "AMAZONTEL TELECOMUNICACOES LTDA, BR" +"AS62785", "AMAZON-FC, US" +"AS135630", "AMAZON-CN Amazon Connection Technology Services (Beijing) Co., LTD, CN" +"AS262486", "PRODAM Processamento de Dados Amazonas S.A, BR" +"AS262772", "Amazonia Publicidade Ltda, BR" +"AS263639", "AMAZONAS TRIBUNAL DE JUSTICA, BR" +"AS264167", "Amazon Wifi Servicos de Internet Ltda, BR" +"AS264344", "AMAZONET TELECOMUNICACOES LTDA, BR" +"AS264509", "CONECTA AMAZONIA TELECOM LTDA. - ME, BR" +"AS266122", "Banco da Amazonia S/A, BR" +"AS266194", "AMAZONET 1 TELECOM LTDA, BR" +"AS267242", "AMAZONIA TELECOMUNICACOES LTDA, BR" +"AS269848", "UNIVERSIDAD REGIONAL AMAZONICA IKIAM, EC" +"AS271017", "AMAZONFIBER SERVICOS DE COMUNICACAO LTDA, BR" +"AS271047", "MPAM - Procuradoria-Geral de Justica do Amazonas, BR" +#+end_example +** jq to get company name, ipRanges and region from amazon provided json +#+begin_src shell +curl 'https://ip-ranges.amazonaws.com/ip-ranges.json' | jq -r '.prefixes[] | [.ip_prefix, .service, .region] | @csv' > /tmp/amazon_raw_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example +#+begin_src shell +ls -al /tmp/amazon_raw_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 183495 Jun 8 10:02 /tmp/amazon_raw_subnet_region.csv +#+end_example +Load raw to bq +#+begin_src shell +bq load --autodetect k8s_artifacts_dataset_bb_test.amazon_raw_subnet_region /tmp/amazon_raw_subnet_region.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example diff --git a/research/asn-data-pipeline/asn_pipeline_docker_file.org b/research/asn-data-pipeline/asn_pipeline_docker_file.org new file mode 100644 index 0000000..48e48c2 --- /dev/null +++ b/research/asn-data-pipeline/asn_pipeline_docker_file.org @@ -0,0 +1,795 @@ +#+TITLE: Asn_pipeline_docker_file +#+PROPERTY: header-args:sql-mode+ :comments none +#+PROPERTY: header-args:shell+ :comments none +#+PROPERTY: header-args:bash+ :comments none + +Goal: Create docker image we can ultimately run in a prow job. +This document concerns itself with the docker file I will use to get an initial docker pipeline service working + +* Outcomes definitions +k8s-infra-ii-sandbox:etl_staging.all_asn_company_outer_join + A table that contains the following columns + - asn + - company_name + - cidr_ip + - start_ip + - end_ip + - name_yaml + - redirectsToRegistry + - redirectsToArtifacts + - region + - website + - email +* Files to process data sources + - Get ans from the yaml on k8s.io + https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org + - Get asn, datacenter from vendor json + https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org + - Get asn2company names from potaroo.net + https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_company_table.org + - Get asn metadata (email, website) from peeringdb + https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_metadata_table.org + - Get ans vendor (asn2iprange) from pyasn + https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_vendor_table.org +* Other asn files out of scope here, but important to remember + - Loading data from the prod logs to get clientip + https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn-data.org + - Mapping the ranges from the asn2iprange set to client_ip + https://github.com/ii/org/blob/main/research/asn-data-pipeline/match-ip-to-ip-range.org +* Concerns + - I have never run the above 4 together, also part of the vendor creation process specifically + converting string_ip to int uses bq to do the conversion, I do not want to over complicate the loading. + +* Lets get to it +** Container image + +Define the image +#+begin_src dockerfile :tangle ./Dockerfile :comments none +FROM golang:1.16.5-stretch as godeps +RUN go get -u github.com/mikefarah/yq/v4 && \ + test -f /go/bin/yq + +FROM postgres:12.7 +RUN apt-get update && \ + apt-get install -y curl && \ + echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" \ + | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ + curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + python3 \ + python3-dev \ + python3-pip \ + python3-wheel \ + python3-setuptools \ + jq \ + curl \ + git \ + gcc \ + libc6-dev \ + gettext-base \ + procps \ + google-cloud-sdk && \ + rm -rf /var/lib/apt/lists/* +RUN pip3 install pyasn +WORKDIR /app +COPY --from=godeps /go/bin/yq /usr/local/bin/yq +COPY ./pg-init.d /docker-entrypoint-initdb.d +COPY ./app . +ENV POSTGRES_PASSWORD=postgres +#+end_src + +Build the image +#+begin_src tmate :window asn-etl +docker build -t asn-etl-pipeline . +#+end_src + +* Discuss architecture with Caleb. +- build container image + - img that has + - PG + - Python + - BGP, PG, BQ + - Script + +- Job + - based on image + - runs script + + + +* Shell script +** Pre-condition for shell +*** TODO +- I am going to allow application use for my gcloud creds on this box +- Set peeringdb_user, peeringdb_password +- Update peeringdb config to go to postgres db +- Make sure pg_USR/PW is set +- Make sql scripts to run, how do I invoke? +- Running directory? + +*** Gcloud +Log into gs cloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC + +Set default project +#+BEGIN_SRC shell :results silent +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +I need to configure my application-default-credentials +#+BEGIN_SRC tmate :window prepare +gcloud auth application-default login +#+END_SRC + +** Setting up an ServiceAccount in GCP for the pipeline +Create the ServiceAccount +#+begin_src shell :results silent +gcloud iam service-accounts create asn-etl \ + --display-name="asn-etl" \ + --description="A Service Account used for ETL with ASN data" +#+end_src +(this will/is be replaced by Terraform) + +Assign the role to the ServiceAccount +#+begin_src shell :prologue "(\n" :epilogue ") 2>&1 ; :" +GCP_PROJECT=k8s-infra-ii-sandbox +GCP_SERVICEACCOUNT="asn-etl@${GCP_PROJECT}.iam.gserviceaccount.com" +ROLES=( + roles/bigquery.user + roles/bigquery.dataEditor + roles/bigquery.dataOwner + roles/ +) + +CURRENT_IAM_POLICIES=$(gcloud projects get-iam-policy "${GCP_PROJECT}" \ + --flatten="bindings[].members" \ + --format='table(bindings.role)' \ + --filter="bindings.members:${GCP_SERVICEACCOUNT}" \ + | tail +2) + +for ROLE in ${ROLES[*]}; do + echo "# Checking role: '${ROLE}'" + if echo "${CURRENT_IAM_POLICIES}" | grep -q -E "(^| )${ROLE}( |$)"; then + echo "# Role '${ROLE}' already exists" + else + gcloud projects add-iam-policy-binding "${GCP_PROJECT}" \ + --member="serviceAccount:${GCP_SERVICEACCOUNT}" \ + --role="${ROLE}" + echo "# Added role '${ROLE}'" + fi +done +while IFS= read -r ROLE; do + echo "${ROLES[*]}" | grep -q -E "(^| )${ROLE}( |$)" + INCLUDES_IN_DECLARATION=$? + if [ ! ${INCLUDES_IN_DECLARATION} -eq 0 ]; then + gcloud projects remove-iam-policy-binding "${GCP_PROJECT}" \ + --member="serviceAccount:${GCP_SERVICEACCOUNT}" \ + --role="${ROLE}" + echo "# Role '${ROLE}' has been removed" + fi +done < <(echo "${CURRENT_IAM_POLICIES}") +#+end_src + +#+RESULTS: +#+begin_example +# Checking role: 'roles/bigquery.user' +# Role 'roles/bigquery.user' already exists +# Checking role: 'roles/bigquery.dataEditor' +# Role 'roles/bigquery.dataEditor' already exists +# Checking role: 'roles/bigquery.dataOwner' +# Role 'roles/bigquery.dataOwner' already exists +#+end_example + +Get iam policies +#+begin_src shell +gcloud projects get-iam-policy k8s-infra-ii-sandbox \ + --flatten="bindings[].members" \ + --format='table(bindings.role)' \ + --filter="bindings.members:asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com" \ + | tail +2 | xargs +#+end_src + +#+RESULTS: +#+begin_example +roles/bigquery.dataEditor roles/bigquery.dataOwner roles/bigquery.user +#+end_example + +Ensure bucket permissions +#+begin_src shell :prologue "(\n" :epilogue ") 2>&1 ; :" +GCP_PROJECT=k8s-infra-ii-sandbox +GCP_BUCKET_NAME=ii_bq_scratch_dump +gsutil iam ch "serviceAccount:asn-etl@${GCP_PROJECT}.iam.gserviceaccount.com:legacyBucketWriter" "gs://${GCP_BUCKET_NAME}" +#+end_src + +** Local testing +Remove all existing keys +#+begin_src shell :prologue "(\n" :epilogue ")\n2>&1 ; :" :results silent +for KEY in $(gcloud iam service-accounts keys list --iam-account=asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com | tail +2 | awk '{print $1}'); do + yes | gcloud iam service-accounts keys delete "${KEY}" --iam-account=asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com +done +#+end_src +(there appears to be a limit of about 10 or so keys per service-account) + +Generate a key file for ServiceAccount auth +#+begin_src shell :results silent +gcloud iam service-accounts keys create /tmp/asn-etl-pipeline-gcp-sa.json --iam-account=asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com +#+end_src + +Change key permissions +#+begin_src shell :results silent +sudo chown 999 /tmp/asn-etl-pipeline-gcp-sa.json +#+end_src + +Test it out with local work +#+begin_src tmate :window asn-etl +TMP_DIR_ETL=$(mktemp -d) +sudo chmod 0777 "${TMP_DIR_ETL}" +docker run \ + -it \ + --rm \ + -e TZ=$TZ \ + -e POSTGRES_PASSWORD="postgres" \ + -e GOOGLE_APPLICATION_CREDENTIALS=/tmp/asn-etl-pipeline-gcp-sa.json \ + -e GCP_PROJECT=k8s-infra-ii-sandbox \ + -e GCP_SERVICEACCOUNT=asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com \ + -e GCP_BIGQUERY_DATASET=etl_script_generated_set \ + -v /tmp/asn-etl-pipeline-gcp-sa.json:/tmp/asn-etl-pipeline-gcp-sa.json \ + -v "${PWD}/pg-init.d:/docker-entrypoint-initdb.d" \ + -v "${TMP_DIR_ETL}:/tmp" \ + -v "${PWD}/app:/app" \ + asn-etl-pipeline +echo "${TMP_DIR_ETL}" +#+end_src + +Test it out normally +#+begin_src tmate :window asn-etl +TMP_DIR_ETL=$(mktemp -d) +sudo chmod 0777 "${TMP_DIR_ETL}" +docker run \ + -it \ + --rm \ + -e TZ=$TZ \ + -e POSTGRES_PASSWORD="postgres" \ + -e GOOGLE_APPLICATION_CREDENTIALS=/tmp/asn-etl-pipeline-gcp-sa.json \ + -e GCP_PROJECT=k8s-infra-ii-sandbox \ + -e GCP_SERVICEACCOUNT=asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com \ + -e GCP_BIGQUERY_DATASET=etl_script_generated_set \ + -v /tmp/asn-etl-pipeline-gcp-sa.json:/tmp/asn-etl-pipeline-gcp-sa.json:ro \ + -v "${TMP_DIR_ETL}:/tmp" \ + asn-etl-pipeline +echo "${TMP_DIR_ETL}" + # -e GCP_BIGQUERY_DATASET_LOGS=etl_script_generated_set_prod \ +#+end_src + +** Postgres init files + +*** IP from PyASN +Given PyASN data, query the ASN data from the resulting /.dat/ file +#+begin_src python :tangle ./app/ip-from-pyasn.py :comments none +## Import pyasn and csv +import pyasn +import csv +import sys + +## Set file path +asnFile = sys.argv[1] +asnDat = sys.argv[2] +pyAsnOutput = sys.argv[3] +## Open asnNumFile and read +asnNum = [line.rstrip() for line in open(asnFile, "r+")] + +## assign our dat file connection string +asndb = pyasn.pyasn(asnDat) +## Declare empty dictionary +destDict = {} +singleAsn = "" + +missingSubnets = [] +## Loop through list of asns +for singleAsn in asnNum: + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + ## Add checking to make sure we have subnets + ## TODO: insert asn with no routes so we know which faiGCP_BIGQUERY_DATASETled without having to do a lookup + if subnets: + ## Add subnets to our dictionaries with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + ## This is what lets us append each loop to the final destDict + destDict.update(originAsnDict) + +if len(missingSubnets) > 0: + print("Subnets missing from ASNs: ", missingSubnets) + +## Open handle to output file +resultsCsv = open(pyAsnOutput, "w") +# write to csv +writer = csv.writer(resultsCsv) +for key, value in destDict.items(): + writer.writerow([key, value]) +#+end_src + +*** Get Dependencies +Create a dataset, ensure that the local data is ready for parsing (from Potaroo) +#+BEGIN_SRC shell :tangle ./pg-init.d/00-get-dependencies.sh +#!/bin/bash +set -x + +set -eo pipefail +eval "${ASN_DATA_PIPELINE_PREINIT:-}" + +PARENTPID=$(ps -o ppid= -p $$) +echo MY PID :: $$ +echo PARENT PID :: $PARENTPID +ps aux + +cat << EOF > $HOME/.bigqueryrc +credential_file = ${GOOGLE_APPLICATION_CREDENTIALS} +project_id = ${GCP_PROJECT} +EOF + +gcloud config set project "${GCP_PROJECT}" + +## This is just to continue testing wile I wait for permissions for the service account +## Use the activate-service-account live once it has permissions +## The container is being run it so it should let me manually do the auth +# gcloud auth login +gcloud auth activate-service-account "${GCP_SERVICEACCOUNT}" --key-file="${GOOGLE_APPLICATION_CREDENTIALS}" + +gcloud auth list + +## GET ASN_COMAPNY section +## using https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_company_table.org +## This will pull a fresh copy, I prefer to use what we have in gs +# curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^,]+), (.*)/"\1", "\3", "\4"/p' | head + +bq ls +# Remove the previous data set +bq rm -r -f "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" || true + +# initalise a new data set with the given name +bq mk \ + --dataset \ + --description "etl pipeline dataset for ASN data from CNCF supporting vendors of k8s infrastructure" \ + "${GCP_PROJECT}:${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" + +if [ ! -f "/tmp/potaroo_data.csv" ]; then + gsutil cp gs://ii_bq_scratch_dump/potaroo_company_asn.csv /tmp/potaroo_data.csv +fi + +# Strip data to only return ASN numbers +cat /tmp/potaroo_data.csv | cut -d ',' -f1 | sed 's/"//' | sed 's/"//'| cut -d 'S' -f2 | tail +2 > /tmp/potaroo_asn.txt + +cat /tmp/potaroo_data.csv | tail +2 | sed 's,^AS,,g' > /tmp/potaroo_asn_companyname.csv + +## GET PYASN section +## using https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_vendor_table.org + +## pyasn installs its utils in ~/.local/bin/* +## Add pyasn utils to path (dockerfile?) +## full list of RIB files on ftp://archive.routeviews.org//bgpdata/2021.05/RIBS/ +cd /tmp +if [ ! -f "rib.latest.bz2" ]; then + pyasn_util_download.py --latest + mv rib.*.*.bz2 rib.latest.bz2 +fi +## Convert rib file to .dat we can process +if [ ! -f "ipasn_latest.dat" ]; then + pyasn_util_convert.py --single rib.latest.bz2 ipasn_latest.dat +fi +## Run the py script we are including in the docker image +python3 /app/ip-from-pyasn.py /tmp/potaroo_asn.txt ipasn_latest.dat /tmp/pyAsnOutput.csv +## This will output pyasnOutput.csv +#+END_SRC + +*** Migrate Schemas +SQL for migrating the database +#+begin_src sql-mode :tangle ./pg-init.d/01-migrate-schemas.sql +begin; + +create table if not exists cust_ip ( + c_ip bigint not null +); + +create table if not exists vendor_expanded_int ( + asn text, + cidr_ip cidr, + start_ip inet, + end_ip inet, + start_ip_int bigint, + end_ip_int bigint, + name_with_yaml_name varchar +); + +create table company_asn ( + asn varchar, + name varchar +); +create table pyasn_ip_asn ( + ip cidr, + asn int +); +create table asnproc ( + asn bigint not null primary key +); + +create table peeriingdbnet ( + data jsonb +); + +create table peeriingdbpoc ( + data jsonb +); + +commit; +#+end_src + +*** Load PyASN data into Postgres +Load ASN data into local Postgrse for processing +#+begin_src sql-mode :tangle ./pg-init.d/02-load-pyasn-output.sql +copy company_asn from '/tmp/potaroo_data.csv' delimiter ',' csv; +copy pyasn_ip_asn from '/tmp/pyAsnOutput.csv' delimiter ',' csv; + +-- Split subnet into start and end +select + asn as asn, + ip as ip, + host(network(ip)::inet) as ip_start, + host(broadcast(ip)::inet) as ip_end +into + table pyasn_ip_asn_extended +from pyasn_ip_asn; + +-- Copy the results to cs +copy (select * from pyasn_ip_asn_extended) to '/tmp/pyasn_expanded_ipv4.csv' csv header; +#+end_src + +*** Load into BigQuery dataset and prepare vendor data +Query for loading extended IP ASN ranges into BigQuery +#+begin_src sql-mode :tangle ./app/ext-ip-asn.sql +SELECT + asn as asn, + ip as cidr_ip, + ip_start as start_ip, + ip_end as end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_start)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_end)) AS end_ip_int + FROM `k8s-infra-ii-sandbox.${GCP_BIGQUERY_DATASET_WITH_DATE}.pyasn_ip_asn_extended` + WHERE regexp_contains(ip_start, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"); +#+end_src + +Load vendor data with ASNs into BigQuery +#+begin_src shell :tangle ./pg-init.d/03-load-into-a-bigquery-dataset.sh +## Load csv to bq +bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).potaroo_all_asn_name" /tmp/potaroo_asn_companyname.csv asn:integer,companyname:string + +## Load a copy of the potaroo_data to bq +# https://github.com/ii/org/blob/main/research/asn-data-pipeline/match-ip-to-ip-range.org +bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).pyasn_ip_asn_extended" /tmp/pyasn_expanded_ipv4.csv asn:integer,ip:string,ip_start:string,ip_end:string + +## Lets go convert the beginning and end into ints +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +envsubst < /app/ext-ip-asn.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).vendor" + +mkdir -p /tmp/vendor + +VENDORS=( + microsoft + google + amazon + alibabagroup + baidu + digitalocean + equinixmetal + huawei + tencentcloud +) +## This should be the end of pyasn section, we have results table that covers start_ip/end_ip from fs our requirements +## GET k8s asn yaml using: +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org +## Lets create csv's to import +for VENDOR in ${VENDORS[*]}; do + curl -s "https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/${VENDOR}.yaml" \ + | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [. ,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' \ + > "/tmp/vendor/${VENDOR}_yaml.csv" + bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).vendor_yaml" "/tmp/vendor/${VENDOR}_yaml.csv" asn_yaml:integer,name_yaml:string,redirectsToRegistry:string,redirectsToArtifacts:string +done + +ASN_VENDORS=( + amazon + google + microsoft +) + +## GET Vendor YAML +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org +## TODO: Make this a loop that goes through dates to find a working URL +## curl "https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_$(date --date='-2 days' +%Y%m%d).json" \ +curl "https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20210906.json" \ + | jq -r '.values[] | .properties.platform as $service | .properties.region as $region | .properties.addressPrefixes[] | [., $service, $region] | @csv' \ + > /tmp/vendor/microsoft_raw_subnet_region.csv +curl 'https://www.gstatic.com/ipranges/cloud.json' \ + | jq -r '.prefixes[] | [.ipv4Prefix, .service, .scope] | @csv' \ + > /tmp/vendor/google_raw_subnet_region.csv +curl 'https://ip-ranges.amazonaws.com/ip-ranges.json' \ + | jq -r '.prefixes[] | [.ip_prefix, .service, .region] | @csv' \ + > /tmp/vendor/amazon_raw_subnet_region.csv + +## Load all the csv +for VENDOR in ${ASN_VENDORS[*]}; do + bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).vendor_json" "/tmp/vendor/${VENDOR}_raw_subnet_region.csv" ipprefix:string,service:string,region:string +done + +mkdir -p /tmp/peeringdb-tables +PEERINGDB_TABLES=( + net + poc +) +for PEERINGDB_TABLE in ${PEERINGDB_TABLES[*]}; do + curl -sG "https://www.peeringdb.com/api/${PEERINGDB_TABLE}" | jq -c '.data[]' | sed 's,",\",g' > "/tmp/peeringdb-tables/${PEERINGDB_TABLE}.json" +done + +# /tmp/potaroo_asn.txt + +## placeholder for sql we will need to import asn_only from +#+end_src + +*** Load and combine PeeringDB + Potaroo ASN data +Prepare ASN data with company names +#+begin_src sql-mode :tangle ./pg-init.d/04-load-asn-data.sql +copy asnproc from '/tmp/potaroo_asn.txt'; + +copy peeriingdbnet (data) from '/tmp/peeringdb-tables/net.json' csv quote e'\x01' delimiter e'\x02'; +copy peeriingdbpoc (data) from '/tmp/peeringdb-tables/poc.json' csv quote e'\x01' delimiter e'\x02'; + +copy ( + select distinct asn.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website", + (poc.data ->> 'email') as email + from asnproc asn + left join peeriingdbnet net on (cast(net.data::jsonb ->> 'asn' as bigint) = asn.asn) + left join peeriingdbpoc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) +-- where (net.data ->>'website') is not null +-- where (poc.data ->> 'email') is not null + order by email asc) to '/tmp/peeringdb_metadata_prepare.csv' csv header; +#+end_src + +*** Load PeeringDB + Potaroo data into BigQuery +Load ASN data with company names into BigQuery +#+begin_src shell :tangle ./pg-init.d/05-bq-load-metadata.sh +## Load output to bq +tail +2 /tmp/peeringdb_metadata_prepare.csv > /tmp/peeringdb_metadata.csv + +bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).metadata" /tmp/peeringdb_metadata.csv asn:integer,name:string,website:string,email:string +#+end_src + +*** Load Kubernetes public artifact traffic logs into BigQuery from GCS bucket +Load logs of usage data +#+begin_src shell :tangle ./pg-init.d/06-bq-load-logs.sh +## Load logs to bq +if [ -z "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + echo "Using dataset logs, since \$GCP_BIGQUERY_DATASET_LOGS was provided and set to '$GCP_BIGQUERY_DATASET_LOGS'" + BUCKETS=( + asia.artifacts.k8s-artifacts-prod.appspot.com + eu.artifacts.k8s-artifacts-prod.appspot.com + k8s-artifacts-cni + k8s-artifacts-cri-tools + k8s-artifacts-csi + k8s-artifacts-gcslogs + k8s-artifacts-kind + k8s-artifacts-prod + us.artifacts.k8s-artifacts-prod.appspot.com + ) + for BUCKET in ${BUCKETS[*]}; do + bq load --autodetect --max_bad_records=2000 ${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).usage_all_raw gs://k8s-infra-artifacts-gcslogs/${BUCKET}_usage* || true + done +fi +#+end_src + +*** Create tables in BigQuery for use in DataStudio dashboard +Prepare BQ Query for distinct IP count +#+begin_src sql-mode :tangle ./app/distinct_c_ip_count.sql +SELECT DISTINCT c_ip, COUNT(c_ip) AS Total_Count FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.usage_all_raw` GROUP BY c_ip ORDER BY Total_Count DESC +#+end_src + +Prepare BQ Query for distinct IP count from logs +#+begin_src sql-mode :tangle ./app/distinct_c_ip_count_logs.sql +SELECT DISTINCT c_ip, COUNT(c_ip) AS Total_Count FROM `${GCP_BIGQUERY_DATASET_LOGS}.usage_all_raw` GROUP BY c_ip ORDER BY Total_Count DESC +#+end_src + +Prepare BQ Query for distinct IPs as int64 with ip as string +#+begin_src shell :tangle ./app/distinct_ip_int.sql +## Get single clientip as int. +SELECT c_ip AS c_ip, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") +#+end_src + +Prepare BQ Query for distinct IP count +#+begin_src shell :tangle ./app/distinct_ipint_only.sql +## Get single clientip as int. +SELECT NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") +#+end_src + +Prepare BQ Query for sourcing ASN data from the ASN provider Potaroo +#+begin_src shell :tangle ./app/potaroo_extra_yaml_name_column.sql +## Potaroo data with extra column for yaml name +SELECT asn, companyname, name_yaml FROM ( SELECT asn, companyname FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.potaroo_all_asn_name`) A LEFT OUTER JOIN ( SELECT asn_yaml, name_yaml FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.vendor_yaml`) B ON A.asn=B.asn_yaml +#+end_src + +Prepare BQ Query for sourcing ASN data from the ASN provider Potaroo without company name +#+begin_src shell :tangle ./app/potaroo_yaml_name_subbed.sql +## Potaroo with company names subbed out +SELECT A.asn, A.companyname, case when name_yaml is not null then name_yaml else B.companyname end as name_with_yaml_name FROM ( SELECT asn, companyname FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.3_potaroo_with_yaml_name_column`) A LEFT JOIN ( SELECT asn, companyname, name_yaml FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.3_potaroo_with_yaml_name_column`) B ON A.asn=B.asn +#+end_src + +Prepare BQ Query for identifying vendors with company names +#+begin_src shell :tangle ./app/vendor_with_company_name.sql +## Add company name to vendor +SELECT A.asn, cidr_ip, start_ip, end_ip, start_ip_int, end_ip_int,name_with_yaml_name FROM ( SELECT asn, cidr_ip, start_ip, end_ip, start_ip_int, end_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.vendor`) A LEFT OUTER JOIN ( SELECT asn, name_with_yaml_name FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}..4_potaroo_with_yaml_name_subbed`) B ON A.asn=B.asn +#+end_src + +Run the above sql to do some more transformations +#+begin_src shell :tangle ./pg-init.d/07_bq_usage_data_transformation.sh +## Get single clientip as int. +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +if [ -n "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + envsubst < /app/distinct_c_ip_count_logs.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count" +else + envsubst < /app/distinct_c_ip_count.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count" +fi +envsubst < /app/distinct_ip_int.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.2_ip_int" +envsubst < /app/distinct_ipint_only.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.2a_ip_int" +envsubst < /app/potaroo_extra_yaml_name_column.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.3_potaroo_with_yaml_name_column" +envsubst < /app/potaroo_yaml_name_subbed.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.4_potaroo_with_yaml_name_subbed" +envsubst < /app/vendor_with_company_name.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.5_vendor_with_company_name" +#+end_src + +*** Export unique IPs into a local file from BigQuery +Grab all distinct clientips +#+begin_src shell :tangle ./pg-init.d/08_download_c_ip_int.sh +## Set a timestamp to work with +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/my-timestamp.txt +## Dump the entire table to gcs +bq extract \ +--destination_format CSV \ +${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).2a_ip_int \ +gs://ii_bq_scratch_dump/2a_ip_inti-$TIMESTAMP-*.csv +## Download the files +TIMESTAMP=$(cat /tmp/my-timestamp.txt | tr -d '\n') +mkdir -p /tmp/usage_all_ip_only/ +gsutil cp \ +gs://ii_bq_scratch_dump/2a_ip_inti-$TIMESTAMP-*.csv \ +/tmp/usage_all_ip_only/ +## Merge the data +cat /tmp/usage_all_ip_only/*.csv | tail +2 > /tmp/usage_all_ip_only_1.csv +cat /tmp/usage_all_ip_only_1.csv | grep -v c_ip_int > /tmp/usage_all_ip_only.csv +#+end_src + +*** Export ASN data with company names into local file from BigQuery +Download our expanded load_vendor for local processing +#+begin_src shell :tangle ./pg-init.d/09_download_expanded_ips.sh +## Set a timestamp to work with +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/my-timestamp.txt +## Dump the entire table to gcs +bq extract \ +--destination_format CSV \ +${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).5_vendor_with_company_name \ +gs://ii_bq_scratch_dump/vendor-$TIMESTAMP-*.csv +## Download the files +TIMESTAMP=$(cat /tmp/my-timestamp.txt | tr -d '\n') +mkdir -p /tmp/expanded_pyasn/ +gsutil cp \ +gs://ii_bq_scratch_dump/vendor-$TIMESTAMP-*.csv \ +/tmp/expanded_pyasn/ +## Merge the data +cat /tmp/expanded_pyasn/*.csv | tail +2 > /tmp/expanded_pyasn_1.csv +cat /tmp/expanded_pyasn_1.csv | grep -v cidr_ip > /tmp/expanded_pyasn.csv +#+end_src + +*** Prepare local data of IP to IP range in Postgres +Copy in the tables, add some indexes and create a dump based on cross join +#+begin_src sql-mode :tangle ./pg-init.d/10-load-single-ip-int.sql +-- Copy the customer ip in +copy cust_ip from '/tmp/usage_all_ip_only.csv'; +-- Copy pyasn expanded in +copy vendor_expanded_int from '/tmp/expanded_pyasn.csv' (DELIMITER(',')); +-- Indexes on the Data we are about to range +create index on vendor_expanded_int (end_ip_int); +create index on vendor_expanded_int (start_ip_int); +create index on cust_ip (c_ip); + +copy ( SELECT vendor_expanded_int.cidr_ip, vendor_expanded_int.start_ip, vendor_expanded_int.end_ip, vendor_expanded_int.asn, vendor_expanded_int.name_with_yaml_name, cust_ip.c_ip FROM vendor_expanded_int, cust_ip WHERE cust_ip.c_ip >= vendor_expanded_int.start_ip_int AND cust_ip.c_ip <= vendor_expanded_int.end_ip_int) TO '/tmp/match-ip-to-iprange.csv' CSV HEADER; +#+end_src + +*** Load the table that matches IP to IP range into BigQuery from local file +Load table for matching IP to IP range to BigQuery +#+begin_src shell :tangle ./pg-init.d/11-upload-ip-range-2-ip.sh +bq load --autodetect ${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).6_ip_range_2_ip_lookup /tmp/match-ip-to-iprange.csv +#+end_src + +*** Create BigQuery table to pull out IP as integer from raw usage logs +Prepare BQ Query for assosiating IP count to usage data +#+begin_src shell :tangle ./app/add_c_ip_int_to_usage_all.sql +SELECT *, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.usage_all_raw` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") +#+end_src + +Prepare BQ Query for assosiating IP count to usage data from logs +#+begin_src shell :tangle ./app/add_c_ip_int_to_usage_all_no_logs.sql +SELECT *, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `${GCP_BIGQUERY_DATASET_LOGS}.usage_all_raw` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") +#+end_src + +Perform creating of BQ table base on IP count and usage data assosiation +#+begin_src shell :tangle ./pg-init.d/12_add_c_ip_int_to_usage_all.sh +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +if [ -n "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + envsubst < /app/add_c_ip_int_to_usage_all_no_logs.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).usage_all_raw_int" +else + envsubst < /app/add_c_ip_int_to_usage_all.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).usage_all_raw_int" +fi +#+end_src + +*** Connect all the data in the dataset of BigQuery together +Prepare BQ Query for connecting all the tables +#+begin_src shell :tangle ./app/join_all_the_things.sql +SELECT time_micros, A.c_ip, c_ip_type, c_ip_region, cs_method, cs_uri, sc_status, cs_bytes, sc_bytes, time_taken_micros, cs_host, cs_referer, cs_user_agent, s_request_id, cs_operation, cs_bucket, cs_object, asn, name_with_yaml_name FROM `${GCP_BIGQUERY_DATASET_WITH_DATE}.usage_all_raw_int` AS A FULL OUTER JOIN `${GCP_BIGQUERY_DATASET_WITH_DATE}.6_ip_range_2_ip_lookup` B ON A.c_ip_int=B.c_ip +#+end_src + +Prepare BQ Query for connecting all the tables from logs +#+begin_src shell :tangle ./app/join_all_the_things_no_logs.sql +SELECT time_micros, A.c_ip, c_ip_type, c_ip_region, cs_method, cs_uri, sc_status, cs_bytes, sc_bytes, time_taken_micros, cs_host, cs_referer, cs_user_agent, s_request_id, cs_operation, cs_bucket, cs_object, asn, name_with_yaml_name FROM `${GCP_BIGQUERY_DATASET_LOGS}.usage_all_raw_int` AS A FULL OUTER JOIN `${GCP_BIGQUERY_DATASET_WITH_DATE}.6_ip_range_2_ip_lookup` B ON A.c_ip_int=B.c_ip +#+end_src + +Perform creating of BQ table based on connecting all the tables +#+begin_src shell :tangle ./pg-init.d/13_prepare_final_table.sh +## Get single clientip as int. +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +if [ -n "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + envsubst < /app/join_all_the_things_no_logs.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).7_asn_company_c_ip_lookup" +else + envsubst < /app/join_all_the_things.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).7_asn_company_c_ip_lookup" +fi +#+end_src + +*** Override the existing data used in the DataStudio report +Copy and promote tables in BQ dataset from current run to production +#+begin_src shell :tangle ./pg-init.d/14-promote-bq-dataset-as-prod.sh +for TABLE in $(bq ls ${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d) | awk '{print $1}' | tail +3 | xargs); do + echo "Removing table '${GCP_BIGQUERY_DATASET}.$TABLE'" + bq rm -f "${GCP_BIGQUERY_DATASET}.$TABLE"; + echo "Copying table '${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).$TABLE' to '${GCP_BIGQUERY_DATASET}.$TABLE'" + bq cp --noappend_table --nono_clobber -f "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).$TABLE" "${GCP_BIGQUERY_DATASET}.$TABLE"; +done +#+end_src + +*** Stop Postgres +Complete the running job +#+begin_src shell :tangle ./pg-init.d/15-stop-postgres.sh +if [ ! "${ASN_DATA_PIPELINE_RETAIN:-}" = true ]; then + # in the Postgres container image, + # the command run changes to "postgres" once it's completed loading up + # and is in a ready state and all of the init scripts have run + # + # here we wait for that state and attempt to exit cleanly, without error + ( + # discover where the postgres process is, even if Prow has injected a PID 1 process + PARENTPID=$(ps -o ppid= -p $$ | awk '{print $1}') + echo MY PID :: $$ + echo PARENT PID :: $PARENTPID + PID=$$ + if [ ! "$(cat /proc/$PARENTPID/cmdline)" = "/tools/entrypoint" ] && [ ! $PARENTPID -eq 0 ]; then + PID=$PARENTPID + fi + ps aux + until [ "$(cat /proc/$PID/cmdline | tr '\0' '\n' | head -n 1)" = "postgres" ]; do + sleep 1s + done + # exit Postgres with a code of 0 + pg_ctl kill QUIT $PID + ) & +fi +#+end_src diff --git a/research/asn-data-pipeline/deploying.org b/research/asn-data-pipeline/deploying.org new file mode 100644 index 0000000..55a0af5 --- /dev/null +++ b/research/asn-data-pipeline/deploying.org @@ -0,0 +1,192 @@ +#+TITLE: Deploying + +The ETL Pipeline is based off the /postgres:12.7-buster/ container image. + +* Variables and configuration + +| Name | Description | Default | +|----------------------------------+----------------------------------------------------------------------------------------------------------+---------| +| ~ASN_DATA_PIPELINE_RETAIN~ | Keep the Postgres instance up | | +| ~GOOGLE_APPLICATION_CREDENTIALS~ | Used by the /gcloud/ command, the value represents a local JSON file containing credentials for GCP auth | ~""~ | +| ~GCP_PROJECT~ | The GCP project to use | ~""~ | +| ~GCP_SERVICEACCOUNT~ | The GCP ServiceAccount to use | ~""~ | +| ~GCP_BIGQUERY_DATASET~ | The GCP BigQuery Dataset to use | ~""~ | + +Also note the variables inherited from the Postgres image, [[https://github.com/docker-library/docs/blob/master/postgres/README.md#environment-variables][here]]. + +* Running as a Kubernetes CronJob + +Define the CronJob +#+begin_src yaml :tangle ./asn-etl-pipeline.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: asn-etl-pipeline + labels: + app: asn-etl-pipeline +spec: + schedule: "*/2 * * * *" + concurrencyPolicy: Forbid + jobTemplate: + metadata: + name: asn-etl-pipeline + spec: + parallelism: 1 + backoffLimit: 0 + template: + metadata: + labels: + app: asn-etl-pipeline + spec: + restartPolicy: Never + containers: + - name: asn-etl-pipeline + image: asn-etl-pipeline + imagePullPolicy: Never + # command: + # - sleep + # - +Inf + volumeMounts: + - name: gcp-app-creds + mountPath: /etc/asn-etl-pipeline + # - name: gcp-user-account + # mountPath: /tmp/gcp-user-account/.config/gcloud + env: + - name: POSTGRES_PASSWORD + value: postgres + - name: GCP_PROJECT + value: k8s-infra-ii-sandbox + - name: GCP_SERVICEACCOUNT + value: asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com + - name: GCP_BIGQUERY_DATASET + value: etl_script_generated_set + - name: GCP_BIGQUERY_DATASET_LOGS + value: etl_script_generated_set_prod + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/asn-etl-pipeline/asn-etl-pipeline-gcp-sa.json + - name: ASN_DATA_PIPELINE_RETAIN + value: "false" + # - name: ASN_DATA_PIPELINE_PREINIT + # value: | + # mkdir -p /var/lib/postgresql/.config ; + # cp -r /tmp/gcp-user-account/.config/gcloud/..data/ /var/lib/postgresql/.config/gcloud/ ; + # mkdir -p /var/lib/postgresql/.config/gcloud/configurations/ ; + # cat << EOF > ~/.config/gcloud/configurations/config_default + # [core] + # project = k8s-infra-ii-sandbox + # account = bb@ii.coop + # EOF + # gsutil ls + volumes: + - name: gcp-app-creds + secret: + secretName: gcp-app-creds + # - name: gcp-user-account + # secret: + # secretName: gcp-user-account + # defaultMode: 0777 +#+end_src + +Create the Secret +#+begin_src shell :results silent +kubectl create secret generic gcp-app-creds \ + --from-file=asn-etl-pipeline-gcp-sa.json=/tmp/asn-etl-pipeline-gcp-sa.json \ + --dry-run=client \ + -o yaml \ + | kubectl apply -f - +#+end_src + +Create the Secret, from local gcloud user creds +#+begin_src shell :results silent +cd "${HOME}/.config/gcloud" +kubectl create secret generic gcp-user-account \ + --from-file="." \ + --dry-run=client \ + -o yaml \ + | kubectl apply -f - +#+end_src + +Deploy the CronJob +#+begin_src shell :results silent +kubectl apply -f ./asn-etl-pipeline.yaml +#+end_src + +Get logs +#+begin_src tmate :window asn-etl +kubectl logs -l app=asn-etl-pipeline --prefix -f +#+end_src + +Delete the CronJob +#+begin_src shell :results silent +kubectl delete -f ./asn-etl-pipeline.yaml +#+end_src + +* Run as ProwJob +In Prow Config +#+begin_src yaml +periodics: + - interval: 5m + agent: kubernetes + name: asn-data-pipeline + decorate: true + spec: + containers: + - name: asn-etl-pipeline + image: asn-etl-pipeline + command: + - /usr/local/bin/docker-entrypoint.sh + - postgres + imagePullPolicy: Never + volumeMounts: + - name: gcp-app-creds + mountPath: /etc/asn-etl-pipeline + env: + - name: GCP_PROJECT + value: k8s-infra-ii-sandbox + - name: GCP_SERVICEACCOUNT + value: asn-etl@k8s-infra-ii-sandbox.iam.gserviceaccount.com + - name: GCP_BIGQUERY_DATASET + value: etl_script_generated_set + - name: GCP_BIGQUERY_DATASET_LOGS + value: etl_script_generated_set_prod + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/asn-etl-pipeline/asn-etl-pipeline-gcp-sa.json + volumes: + - name: gcp-app-creds + secret: + secretName: gcp-app-creds +#+end_src + +Create the Secret +#+begin_src shell :results silent +kubectl -n prow-workloads create secret generic gcp-app-creds \ + --from-file=asn-etl-pipeline-gcp-sa.json=<(sudo cat /tmp/asn-etl-pipeline-gcp-sa.json) \ + --dry-run=client \ + -o yaml \ + | kubectl apply -f - +#+end_src + +Update the local =cncf-infra/prow-config/.sharing.io/prow-config-template.yaml= with the periodics contents above, then +rerunning the =cncf-infra/prow-config/.sharing.io/init= script, +#+begin_src shell :results silent +$INIT_DEFAULT_REPOS_FOLDER/cncf-infra/prow-config/.sharing.io/init +#+end_src + +Restart Prow Components +#+begin_src shell +kubectl -n prow rollout restart $(kubectl -n prow get deployments -o=jsonpath='{range .items[*]}{.kind}/{.metadata.name} {end}') +#+end_src + +#+RESULTS: +#+begin_example +deployment.apps/prow-crier restarted +deployment.apps/prow-deck restarted +deployment.apps/prow-ghproxy restarted +deployment.apps/prow-hook restarted +deployment.apps/prow-horologium restarted +deployment.apps/prow-minio restarted +deployment.apps/prow-plank restarted +deployment.apps/prow-sinker restarted +deployment.apps/prow-statusreconciler restarted +deployment.apps/prow-tide restarted +#+end_example diff --git a/research/asn-data-pipeline/etl_asn_company_table.org b/research/asn-data-pipeline/etl_asn_company_table.org new file mode 100644 index 0000000..2632721 --- /dev/null +++ b/research/asn-data-pipeline/etl_asn_company_table.org @@ -0,0 +1,188 @@ +#+TITLE: Etl_asn_company_table +Goal get most definitive Company ASN match from: +https://bgp.potaroo.net/cidr/autnums.html + +* Get asn to company list +TODDO: this section needs to be replaced with: +curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^"]+)/"\1", "\3"/p' +I tested it and it still failed on one line, I did not troubleshoot, but this would be a way better way to get the data +That Said the solution using beautiful soup still works + +** curl the data +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME")) + mkdir autonums + cd autonums +# wget https://bgp.potaroo.net/cidr/autnums.html +curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^"]+)/"\1", "\3"/p > autnums_sed.csv +#+END_SRC +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +cat autnums.html +#+END_SRC +** Suggestion from zz is to use the beautiful soup library from python +I am going to work through https://zetcode.com/python/beautifulsoup/#:~:text=BeautifulSoup%20is%20a%20Python%20library,%2C%20navigable%20string%2C%20or%20comment. +I will work in the autonums dir + +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +sudo pip3 install lxml +#+END_SRC +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +sudo pip3 install bs4 +#+END_SRC +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +cat autnums.html | head +#+END_SRC + +#+BEGIN_SRC python tmate :window python +#print('Please wait') +return 'A line of text.\n' +#+END_SRC + +#+RESULTS: +#+begin_example +/home/ii/ii/org/research +#+end_example +*** run python in-line, FAIL +Python in org is not working, something related to list +Error: if: Symbol’s value as variable is void: eshell-modules-list +Skipping troubleshooting this, just going to run it as a script in tmate +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/autonums") :results output +from bs4 import BeautifulSoup +print ('start') +with open('/home/ii/autonums/autnums.html', 'r') as f: + contents = f.read() + soup = BeautifulSoup(contents, 'lxml') + print(soup.head) +print ('end') +#+END_SRC +Even very basic command that works in a old box fails +#+BEGIN_SRC python tmate :window python +#print('Please wait') +return 'A line of text.\n'.rstrip() +#+END_SRC + +*** run python in script WORKS +You have to tangle this file out with ctrl-c, ctrl-v, t +NOTE, that command will write files for all tangle blocks in the document +If you dont want this on to write to disk again just comment it out. +#+BEGIN_SRC python :tangle (concat (getenv "HOME") "/autonums/testing_soup.py") +#!/usr/bin/python +from bs4 import BeautifulSoup +with open('/home/ii/autonums/autnums.html', 'r') as f: + contents = f.read() + soup = BeautifulSoup(contents, 'lxml') + print(soup.head) +#+END_SRC +This runs the above to produce output +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +chmod +x testing_soup.py +python testing_soup.py +#+END_SRC +** Build the script using beautiful soup +#+BEGIN_SRC python :tangle (concat (getenv "HOME") "/autonums/soup.py") +#!/usr/bin/python +from bs4 import BeautifulSoup +with open('/home/ii/autonums/autnums.html', 'r') as input_file: + contents = input_file.read() + soup = BeautifulSoup(contents, 'lxml') +# printn(soup.a) +for tag in soup.find_all('a'): + asn = (f'"{tag.text}, {tag.next_sibling}"') + # print(asn) + results_file = open("asn_company_results.csv", "a") + results_file.write(asn) + results_file.write("\n") + # print >>results_file, asn + results_file.close() +input_file.close() +#+END_SRC +** Run script, clean up ouput. +TODO: Replace entire polling of this date, it could be done in 3 lines by somebody comfortable with regex. +This runs the above to produce output, +It will fail unless you run the sed cleanup block below. + #+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +chmod +x soup.py +python soup.py +#+END_SRC +Litle cleanup of the output +ok this is GROSSS! I know I know, dont judge me +Obviously I just dealt with each import failure one at a time till it worked +Stephen is taking a look to see if he can clean some of this up to be a bit more elegant +Thank you Stephen!! +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +sed -i 's/,//2' asn_company_results.csv +sed -i 's/,//2' asn_company_results.csv +sed -i 's/,//2' asn_company_results.csv +sed -i 's/,//2' asn_company_results.csv +sed -i 's/,//2' asn_company_results.csv +sed -i 's/,//2' asn_company_results.csv +sed -i 's/,//2' asn_company_results.csv +sed -i 's/\.,//' asn_company_results.csv +sed -i 's/\s\+,/\,/' asn_company_results.csv +sed -i 's/,\s\+/,\"/' asn_company_results.csv +sed -i 's/,/\",/' asn_company_results.csv +sed -i 's/CT-CNGI China telecom AC\/AEUR.*/CT-CNGI China telecom AC\/AEURA/g' asn_company_results.csv +sed -i 's/IRKUT_IAP-AS.*/IRKUT_IAP-AS/g' asn_company_results.csv +sed -i '/^\"$/d' asn_company_results.csv +sed -i 's/$/\"/' asn_company_results.csv +#+END_SRC +** Import to postgres +I stand up a postgres instance in the peeringdb section +If you need one go look in peeringdb to see the command to start one. +#+BEGIN_SRC sql-mode +-- adding this table to match wat caleb used +--create table asnproc (asn varchar, name varchar); +\COPY asnproc from '/home/ii/autonums/asn_company_results.csv' DELIMITER ',' CSV; +--create table company_asn (asn varchar, name varchar); +\COPY company_asn from '/home/ii/autonums/asn_company_results.csv' DELIMITER ',' CSV; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC +#+BEGIN_SRC sql-mode +--select * from company_asn limit 10; +select * from asnproc limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC +** Export and upload to gbucket + +#+begin_src sql-mode +\copy (select * from asnproc) to '~/peeringdb_company_asn.csv' csv header; +#+end_src + +#+RESULTS: +#+begin_SRC example +COPY 181219 +#+end_SRC +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC + +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +#+BEGIN_SRC shell :results silent +gsutil cp ~/peeringdb_company_asn.csv gs://ii_bq_scratch_dump/peeringdb_company_asn.csv +#+END_SRC +#+BEGIN_SRC shell +gsutil ls gs://ii_bq_scratch_dump/ +#+END_SRC + +#+RESULTS: +#+begin_example +gs://ii_bq_scratch_dump/asn-data.csv +gs://ii_bq_scratch_dump/haha.csv +gs://ii_bq_scratch_dump/haha.json +gs://ii_bq_scratch_dump/image-tag-hash.csv +gs://ii_bq_scratch_dump/ip-and-asn.json +gs://ii_bq_scratch_dump/peeringdb-dump-20210512.sql +gs://ii_bq_scratch_dump/peeringdb-dump-20210603.sql +gs://ii_bq_scratch_dump/peeringdb_company_asn.csv +gs://ii_bq_scratch_dump/resource_and_hash_distinct_list.json +#+end_example diff --git a/research/asn-data-pipeline/etl_asn_metadata_table.org b/research/asn-data-pipeline/etl_asn_metadata_table.org new file mode 100644 index 0000000..dff1006 --- /dev/null +++ b/research/asn-data-pipeline/etl_asn_metadata_table.org @@ -0,0 +1,530 @@ +#+TITLE: Etl_asn_metadata_table +Goal is to get ASN metadata from peeringdb +* Desired output: +- ASN +- Company name +- Company email +- Company website + +* Get company to asn list +Process for generating this list is captured in asn-data-pipeline/etl_asn_company_table.org +#+BEGIN_SRC shell :results silent +gsutil cp gs://ii_bq_scratch_dump/peeringdb_company_asn.csv ~/peeringdb_company_asn.csv +#+END_SRC +* Peeringdb - setup, exploration +I found a very cool new way to look at the peeringdb data +[https://www.peeringdb.com/api/](https://www.peeringdb.com/api/) +Looks like direct access to the data on ix, ixlan, net, netfac, and org +sadly it does confirm that we only have 22 320 records + `curl -sG https://www.peeringdb.com/api/net --data-urlencode fields=id | jq '.data | length`' +22320 +** Parse from peeringdb using Postgres + +Bring up Postgres +#+BEGIN_SRC tmate :window postgres +docker run -it --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_DB=peeringdb postgres:12.2-alpine +#+END_SRC + +Clone https://git.2e8.dk/peeringdb-simplesync +#+BEGIN_SRC tmate :window prepare :dir (getenv "HOME") +git clone https://git.2e8.dk/peeringdb-simplesync +cd peeringdb-simplesync +#+END_SRC + +Set psql creds +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +export \ + PGUSER=postgres \ + PGPASSWORD=password +#+END_SRC + +import the schema +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < schema.sql +#+END_SRC + +Enter PeeringDB creds ( you will need valid credentials for peeringdb.com ) +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +read -p 'PEERINGDB_USER : ' PEERINGDB_USER +#+END_SRC +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +read -p 'PEERINGDB_PASSWORD: ' PEERINGDB_PASSWORD +#+END_SRC + +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +export PEERINGDB_USER PEERINGDB_PASSWORD +#+END_SRC + +Write the config for sync.py +#+BEGIN_SRC python :tangle (concat (getenv "HOME") "/peeringdb-simplesync/config.py") +from requests.auth import HTTPBasicAuth +import os + +host=os.environ['SHARINGIO_PAIR_LOAD_BALANCER_IP'] +user=os.environ['PEERINGDB_USER'] +password=os.environ['PEERINGDB_PASSWORD'] + +def get_config(): + return { + 'db_conn_str': 'dbname=peeringdb host=%s user=postgres password=password' % host, + 'db_schema': 'peeringdb', + 'auth': HTTPBasicAuth(user, password) + } +#+END_SRC + +Dump all of the data +I had to install psycopg2 +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +pip install psycopg2-binary +#+END_SRC +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +python3 ./sync.py +#+END_SRC + +** Create a new dump +After running the above Dump the database +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +pg_dump -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP > peeringdb-dump-$(date +%Y%m%d).sql +#+END_SRC +Upload the dump +#+BEGIN_SRC tmate :window peeringdb-sync +gsutil cp peeringdb-dump-$(date +%Y%m%d).sql gs://ii_bq_scratch_dump/peeringdb-dump-$(date +%Y%m%d).sql +#+END_SRC + +** Stand up local peeringdb with pre-prepared dump +Download from the bucket +#+BEGIN_SRC tmate :window peeringdb-sync +gsutil cp gs://ii_bq_scratch_dump/peeringdb-dump-20210512.sql ./peeringdb-dump-20210512.sql +#+END_SRC + +Load the data from the dump into a new/separate Postgres instance +#+BEGIN_SRC tmate :window peeringdb-sync +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < ./peeringdb-dump-20210512.sql +#+END_SRC +** Peeringdb Schema exploration +There is a full schema explorationon on: https://github.com/ii/org/blob/main/research/asn-to-company-peeringdb-data/asn-to-company-peeringdb-data.org#schema-exploration +Api docs on: https://www.peeringdb.com/apidocs/ +Quick review of the scehma: +#+begin_SRC example + schemaname | tablename +------------+----------- + peeringdb | fac + peeringdb | ix + peeringdb | ixfac + peeringdb | ixlan + peeringdb | ixpfx + peeringdb | net + peeringdb | netfac + peeringdb | netixlan + peeringdb | org + peeringdb | poc +(10 rows) + +#+end_SRC + +The only tables I care about for this document is: peeringdb.net and peeringdb.poc + +*** peeringdb.net + +#+BEGIN_SRC sql-mode +select * from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | asn | status | data | created | updated | deleted +----+--------+------+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 83 | 3152 | 5388 | ok | {"id": 83, "aka": "", "asn": 5388, "name": "Cable&Wireless UK", "notes": "This is former Energis Communications UK backbone network (AS5388) which is now owned by Cable and Wireless.\r\n\r\nAS5388 have no direct peering relations any longer, for peering request please contact our backbone AS1273 peering team.\r\n\r\nCable and Wireless global backbone network (AS1273) has a separate PeeringDB entry.\r\n", "org_id": 3152, "status": "ok", "created": "2004-08-03T10:30:54Z", "updated": "2016-03-14T20:23:33Z", "website": "http://www.cw.com/uk", "info_ipv6": false, "info_type": "NSP", "name_long": "", "info_ratio": "Balanced", "info_scope": "Regional", "irr_as_set": "AS-ENERGIS", "policy_url": "", "poc_updated": "2020-01-22T04:24:08Z", "info_traffic": "10-20Gbps", "info_unicast": true, "policy_ratio": false, "route_server": "", "looking_glass": "http://as5388.net/cgi-bin/lg.pl", "info_multicast": false, "info_prefixes4": 30, "info_prefixes6": 2, "netfac_updated": "2016-03-14T21:24:34Z", "policy_general": "Restrictive", "allow_ixp_update": false, "netixlan_updated": null, "policy_contracts": "Not Required", "policy_locations": "Not Required", "info_never_via_route_servers": false} | 2004-08-03 10:30:54+00 | 2016-03-14 20:23:33+00 | +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select id, org_id, status, data::jsonb ->> 'asn' as asn, data::jsonb ->> 'name' as name, data::jsonb ->> 'website' as website from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | status | asn | name | website +----+--------+--------+------+-------------------+---------------------- + 83 | 3152 | ok | 5388 | Cable&Wireless UK | http://www.cw.com/uk +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select count(data::jsonb ->> 'asn') from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 23095 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.net limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + { + + "id": 83, + + "aka": "", + + "asn": 5388, + + "name": "Cable&Wireless UK", + + "notes": "This is former Energis Communications UK backbone network (AS5388) which is now owned by Cable and Wireless.\r\n\r\nAS5388 have no direct peering relations any longer, for peering request please contact our backbone AS1273 peering team.\r\n\r\nCable and Wireless global backbone network (AS1273) has a separate PeeringDB entry.\r\n",+ + "org_id": 3152, + + "status": "ok", + + "created": "2004-08-03T10:30:54Z", + + "updated": "2016-03-14T20:23:33Z", + + "website": "http://www.cw.com/uk", + + "info_ipv6": false, + + "info_type": "NSP", + + "name_long": "", + + "info_ratio": "Balanced", + + "info_scope": "Regional", + + "irr_as_set": "AS-ENERGIS", + + "policy_url": "", + + "poc_updated": "2020-01-22T04:24:08Z", + + "info_traffic": "10-20Gbps", + + "info_unicast": true, + + "policy_ratio": false, + + "route_server": "", + + "looking_glass": "http://as5388.net/cgi-bin/lg.pl", + + "info_multicast": false, + + "info_prefixes4": 30, + + "info_prefixes6": 2, + + "netfac_updated": "2016-03-14T21:24:34Z", + + "policy_general": "Restrictive", + + "allow_ixp_update": false, + + "netixlan_updated": null, + + "policy_contracts": "Not Required", + + "policy_locations": "Not Required", + + "info_never_via_route_servers": false + + } + { + + "id": 24, + + "aka": "Extreme Telecom", + + "asn": 19817, + + "name": "DSLExtreme", + + "notes": "", + + "org_id": 62, + + "status": "ok", + + "created": "2004-07-28T00:00:00Z", + + "updated": "2016-03-14T20:47:30Z", + + "website": "http://www.dslextreme.com", + + "info_ipv6": false, + + "info_type": "Cable/DSL/ISP", + + "name_long": "", + + "info_ratio": "Mostly Inbound", + + "info_scope": "Regional", + + "irr_as_set": "", + + "policy_url": "", + + "poc_updated": "2016-03-14T21:35:12Z", + + "info_traffic": "1-5Gbps", + + "info_unicast": true, + + "policy_ratio": false, + + "route_server": "", + + "looking_glass": "", + + "info_multicast": false, + + "info_prefixes4": 69, + + "info_prefixes6": 3, + + "netfac_updated": "2016-03-14T20:33:54Z", + + "policy_general": "Open", + + "allow_ixp_update": false, + + "netixlan_updated": "2021-05-12T00:13:00.764215Z", + + "policy_contracts": "Not Required", + + "policy_locations": "Not Required", + + "info_never_via_route_servers": false + + } +(2 rows) + +#+end_SRC + +*** peeringdb.poc +#+BEGIN_SRC sql-mode +select * from peeringdb.poc limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | net_id | status | data | created | updated | deleted +-----+--------+--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 100 | 115 | ok | {"id": 100, "url": "", "name": "Telefonica DE Peering Team", "role": "Policy", "email": "peering.de@telefonica.com", "phone": "", "net_id": 115, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-05-20T13:55:47Z", "visible": "Public"} | 2010-07-29 00:00:00+00 | 2016-05-20 13:55:47+00 | +(1 row) + +#+end_SRC + + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.poc limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + { + + "id": 100, + + "url": "", + + "name": "Telefonica DE Peering Team",+ + "role": "Policy", + + "email": "peering.de@telefonica.com",+ + "phone": "", + + "net_id": 115, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-05-20T13:55:47Z", + + "visible": "Public" + + } + { + + "id": 48, + + "url": "", + + "name": "NOC", + + "role": "NOC", + + "email": "noc@stealth.net", + + "phone": "+12122322020", + + "net_id": 26, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2020-05-20T23:14:22Z", + + "visible": "Public" + + } + +#+end_SRC + +* Get company to asn list - delete heading below +Log into gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC +#+BEGIN_SRC shell :results silent +gsutil cp gs://ii_bq_scratch_dump/potaroo_company_asn.csv ~/potaroo_company_asn.csv +#+END_SRC +#+BEGIN_SRC shell :results silent +gsutil cp gs://ii_bq_scratch_dump/peeringdb_company_asn.csv gs://ii_bq_scratch_dump/potaroo_company_asn.csv +#+END_SRC +#+BEGIN_SRC shell +#gsutil ls gs://ii_bq_scratch_dump/ +ls -al ~/potaroo_company_asn.csv +#+END_SRC + +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 6668250 Jun 8 05:38 /home/ii/potaroo_company_asn.csv +#+end_example + +** Building asn-ip list with Postgres (this requires import of a asn list) +#+BEGIN_SRC sql-mode +create schema asntocompany; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +CREATE SCHEMA +#+end_SRC +Split asn from dump so we have table with ony asns, will name that one `asnproc` +#+BEGIN_SRC tmate :window autonums :dir (getenv "HOME") +cat /home/ii/potaroo_company_asn.csv | cut -d ',' -f1 | sed 's/"//' | sed 's/"//'| cut -d 'S' -f2 >> asns_only.txt +#+END_SRC + ++BEGIN_SRC sql-mode + create table asnproc ( + asn bigint not null primary key + ); +\copy asnproc from '/home/ii/autonums/asns_only.txt'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC +** Trying a few queries to see what I see +#+BEGIN_SRC sql-mode +select (net.data ->> 'name') as "name", + asn + from peeringdb.net + where (net.data ->> 'name') ilike '%google%' + limit 5; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + name | asn +--------------------+------- + Google LLC | 15169 + Google LLC AS19527 | 19527 + Google LLC AS36040 | 36040 + Google LLC AS43515 | 43515 + Google Fiber, Inc. | 16591 +(5 rows) + +#+end_SRC + +We only have 23k asns +#+BEGIN_SRC sql-mode +select count(*) + from peeringdb.net + where (net.data ->> 'asn') is not null; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 23097 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select count(*) +from peeringdb.poc p +where (p.data ->> 'email') is not null; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 10756 +(1 row) + +#+end_SRC + + +#+BEGIN_SRC sql-mode +select + (poc.data ->> 'name') as poc_name +from peeringdb.poc poc +-- left join peeringdb.poc poc on ((net.data ->>'name') = (poc.data ->>'name')) +where (poc.data ->> 'name') ilike '%google%' +or (poc.data ->> 'name') ilike '%amazon%' +or (poc.data ->> 'name') ilike '%microsoft%'; +-- where (net.data ->>'name') ilike '%google%'; +-- select data from peeringdb.net where (data ->> 'asn')::bigint = 21789 limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + poc_name +----------------------------------- + noc@google.com + Google Fiber NOC + Diretoria de Tecnologia Amazontel + Diretoria de Tecnologia Amazontel + Ganesh Wakode Google Mail +(5 rows) + +#+end_SRC + +#+BEGIN_SRC sql-mode +begin; +-- create table asnproc ( +-- asn bigint not null primary key +-- ); +-- \copy asnproc from '/home/ii/peeringdb-simplesync/asns.txt'; +select count(*) from peeringdb.poc; +select net.id, + asnproc.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website" + -- (poc.data ->> 'email') as email + from asnproc + join peeringdb.net net on ((net.data ->> 'asn')::bigint = asnproc.asn) + -- left join peeringdb.poc poc on ((poc.data ->> 'name') = 'chonkers') + -- left join peeringdb.poc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) + -- where (net.data ->>'website') is not null + -- order by email asc + limit 5; +rollback; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +BEGIN + count +------- + 34255 +(1 row) + +#+end_SRC + + +* Getting the metadata from peeringdb +It looks like this is the best info I can get from this database +There are not that many entries in this database, I am not super impressed with the info, it will probably end up being more supplimental for regional registrars +I am going to skip peeringdb for the vendor table (asn, name, subnet, start_ip, end_ip ) it simply does not have enough data to contribute +I will use shadowserver and pyasn for that table and peeringdb for the metadata table (asn, name, email, website) + +Use this when we create the metadata tables +###### This is the best I can find for asn, name, website, email########### +#+BEGIN_SRC sql-mode +select distinct asn.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website", + (poc.data ->> 'email') as email + into asn_name_web_email + from asnproc asn + left join peeringdb.net net on (net.asn = asn.asn) + left join peeringdb.poc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) + -- where (net.data ->>'website') is not null + -- where (poc.data ->> 'email') is not null + order by email asc limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + asn | name | website | email +--------+------------------------+----------------------------------+------------------------------- + 268729 | Wnett Fibra | http://www.wnettfibra.com.br | + 131916 | BAYNET | http://www.baynet.ne.jp/ | BAYNET-peer@tokyobaynet.co.jp + 206161 | Nils Steinger | https://voidptr.de | NST24-RIPE@voidptr.de + 206161 | Nils Steinger | https://voidptr.de | RIPE-abuse@voidptr.de + 53113 | AGYONET | http://WWW.AGYONET.COM.BR | SUPORTEADM@AGYONET.COM.BR + 269501 | OneTech Telecom | http://www.onetechtelecom.com.br | abuse2@onetechtelecom.com.br + 213261 | Sebastian-Wilhelm Graf | http://sebastian-graf.at | abuse@AS213261.net + 213126 | Andreas Fries | | abuse@afries.ch + 196865 | Aircomm S.r.l. | http://www.aircomm.it | abuse@aircomm.it + 208266 | Hanqi Yang | https://network.alanyhq.com/ | abuse@alanyhq-global.net +(10 rows) + +#+end_SRC + + +#+RESULTS: + + + +#+BEGIN_SRC sql-mode +select id, org_id, status, data::jsonb ->> 'asn' as asn, data::jsonb ->> 'name' as name, data::jsonb ->> 'website' as website from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | status | asn | name | website +----+--------+--------+------+-------------------+---------------------- + 83 | 3152 | ok | 5388 | Cable&Wireless UK | http://www.cw.com/uk +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select count(data) from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 23095 +(1 row) + +#+end_SRC diff --git a/research/asn-data-pipeline/etl_asn_vendor_table.org b/research/asn-data-pipeline/etl_asn_vendor_table.org new file mode 100644 index 0000000..8852875 --- /dev/null +++ b/research/asn-data-pipeline/etl_asn_vendor_table.org @@ -0,0 +1,600 @@ +#+TITLE: Build_vendor_table +Goal is to combine ip-asn data from 3 sources +- peeringdb +- shadowserver.org +- pyasn +* Desired ouput +- ASN +- Company name +- subnet +- start_ip +- end_ip +- start_ip_int +- end_ip_int +* Get company to asn list - delete heading below + +#+BEGIN_SRC shell :results silent +gsutil cp ~/potaroo_company_asn.csv gs://ii_bq_scratch_dump/potaroo_company_asn.csv +#+END_SRC +#+BEGIN_SRC shell +gsutil ls -al gs://ii_bq_scratch_dump/potaroo_company_asn.csv +#+END_SRC + +#+RESULTS: +#+begin_example + 3334138 2021-06-08T00:10:05Z gs://ii_bq_scratch_dump/potaroo_company_asn.csv#1623111005815246 metageneration=1 +TOTAL: 1 objects, 3334138 bytes (3.18 MiB) +#+end_example +#+BEGIN_SRC shell +cat potaroo_company_asn.csv | wc -l +#+END_SRC + +#+RESULTS: +#+begin_example +0 +#+end_example + +* Get asn to company list +TODDO: this section needs to be replaced with: +curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^"]+)/"\1", "\3"/p' +I tested it and it still failed on one line, I did not troubleshoot, but this would be a way better way to get the data + +I found quite a few, but the most complete list at this time is: +https://bgp.potaroo.net/cidr/autnums.html +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME")) +# mkdir autonums +# cd autonums +# wget https://bgp.potaroo.net/cidr/autnums.html +curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^"]+)/"\1", "\3"/p' > ~/autnums_sed.csv +#+END_SRC +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/") +cat ~/autnums_sed.csv | head +#+END_SRC + + +** Import to postgres +I stand up a postgres instance in the peeringdb section +If you need one go look in peeringdb to see the command to start one. +#+BEGIN_SRC sql-mode +-- adding this table to match wat caleb used +-- create table asnproc (asn varchar, name varchar); +-- \COPY asnproc from '/home/ii/autonums/asn_company_results.csv' DELIMITER ',' CSV; +-- create table company_asn (asn varchar, name varchar); +\COPY company_asn from '/home/ii/autonums_sed.csv' DELIMITER ',' CSV; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC +#+BEGIN_SRC sql-mode +--select * from company_asn limit 10; +select * from company_asn limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC + +* Peeringdb - skipping open fold to see logic +tldr; peeringdb only has 22 500 asn's we have 2 other sources with way more. +I will rather use peeringdb to get asn metadata until we get access to arin and other registrars +I found a very cool new way to look at the peeringdb data +[https://www.peeringdb.com/api/](https://www.peeringdb.com/api/) +Looks like direct access to the data on ix, ixlan, net, netfac, and org +sadly it does confirm that we only have 22 320 records + `curl -sG https://www.peeringdb.com/api/net --data-urlencode fields=id | jq '.data | length`' +22320 +** Parse from peeringdb using Postgres + +Bring up Postgres +#+BEGIN_SRC tmate :window postgres +docker run -it --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_DB=peeringdb postgres:12.2-alpine +#+END_SRC + +Clone https://git.2e8.dk/peeringdb-simplesync +#+BEGIN_SRC tmate :window prepare :dir (getenv "HOME") +git clone https://git.2e8.dk/peeringdb-simplesync +cd peeringdb-simplesync +#+END_SRC + +Set psql creds +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +export \ + PGUSER=postgres \ + PGPASSWORD=password +#+END_SRC + +import the schema +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < schema.sql +#+END_SRC + +Enter PeeringDB creds ( you will need valid credentials for peeringdb.com ) +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +read -p 'PEERINGDB_USER : ' PEERINGDB_USER +#+END_SRC +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +read -p 'PEERINGDB_PASSWORD: ' PEERINGDB_PASSWORD +#+END_SRC + +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +export PEERINGDB_USER PEERINGDB_PASSWORD +#+END_SRC + +Write the config for sync.py +#+BEGIN_SRC python :tangle (concat (getenv "HOME") "/peeringdb-simplesync/config.py") +from requests.auth import HTTPBasicAuth +import os + +host=os.environ['SHARINGIO_PAIR_LOAD_BALANCER_IP'] +user=os.environ['PEERINGDB_USER'] +password=os.environ['PEERINGDB_PASSWORD'] + +def get_config(): + return { + 'db_conn_str': 'dbname=peeringdb host=%s user=postgres password=password' % host, + 'db_schema': 'peeringdb', + 'auth': HTTPBasicAuth(user, password) + } +#+END_SRC + +Dump all of the data +I had to install psycopg2 +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +pip install psycopg2-binary +#+END_SRC +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +python3 ./sync.py +#+END_SRC + +** Create a new dump +After running the above Dump the database +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +pg_dump -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP > peeringdb-dump-$(date +%Y%m%d).sql +#+END_SRC +Upload the dump +#+BEGIN_SRC tmate :window peeringdb-sync +gsutil cp peeringdb-dump-$(date +%Y%m%d).sql gs://ii_bq_scratch_dump/peeringdb-dump-$(date +%Y%m%d).sql +#+END_SRC + +** Stand up local peeringdb with pre-prepared dump +Download from the bucket +#+BEGIN_SRC tmate :window peeringdb-sync +gsutil cp gs://ii_bq_scratch_dump/peeringdb-dump-20210512.sql ./peeringdb-dump-20210512.sql +#+END_SRC + +Load the data from the dump into a new/separate Postgres instance +#+BEGIN_SRC tmate :window peeringdb-sync +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < ./peeringdb-dump-20210512.sql +#+END_SRC +** Peeringdb Schema exploration +There is a full schema explorationon on: https://github.com/ii/org/blob/main/research/asn-to-company-peeringdb-data/asn-to-company-peeringdb-data.org#schema-exploration +Api docs on: https://www.peeringdb.com/apidocs/ +Quick review of the scehma: +#+begin_SRC example + schemaname | tablename +------------+----------- + peeringdb | fac + peeringdb | ix + peeringdb | ixfac + peeringdb | ixlan + peeringdb | ixpfx + peeringdb | net + peeringdb | netfac + peeringdb | netixlan + peeringdb | org + peeringdb | poc +(10 rows) + +#+end_SRC + +The only tables I care about for this document is: peeringdb.net and peeringdb.poc + +*** peeringdb.net + +#+BEGIN_SRC sql-mode +select * from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | asn | status | data | created | updated | deleted +----+--------+------+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 83 | 3152 | 5388 | ok | {"id": 83, "aka": "", "asn": 5388, "name": "Cable&Wireless UK", "notes": "This is former Energis Communications UK backbone network (AS5388) which is now owned by Cable and Wireless.\r\n\r\nAS5388 have no direct peering relations any longer, for peering request please contact our backbone AS1273 peering team.\r\n\r\nCable and Wireless global backbone network (AS1273) has a separate PeeringDB entry.\r\n", "org_id": 3152, "status": "ok", "created": "2004-08-03T10:30:54Z", "updated": "2016-03-14T20:23:33Z", "website": "http://www.cw.com/uk", "info_ipv6": false, "info_type": "NSP", "name_long": "", "info_ratio": "Balanced", "info_scope": "Regional", "irr_as_set": "AS-ENERGIS", "policy_url": "", "poc_updated": "2020-01-22T04:24:08Z", "info_traffic": "10-20Gbps", "info_unicast": true, "policy_ratio": false, "route_server": "", "looking_glass": "http://as5388.net/cgi-bin/lg.pl", "info_multicast": false, "info_prefixes4": 30, "info_prefixes6": 2, "netfac_updated": "2016-03-14T21:24:34Z", "policy_general": "Restrictive", "allow_ixp_update": false, "netixlan_updated": null, "policy_contracts": "Not Required", "policy_locations": "Not Required", "info_never_via_route_servers": false} | 2004-08-03 10:30:54+00 | 2016-03-14 20:23:33+00 | +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select id, org_id, status, data::jsonb ->> 'asn' as asn, data::jsonb ->> 'name' as name, data::jsonb ->> 'website' as website from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | status | asn | name | website +----+--------+--------+------+-------------------+---------------------- + 83 | 3152 | ok | 5388 | Cable&Wireless UK | http://www.cw.com/uk +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select count(data::jsonb ->> 'asn') from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 23095 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.net limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + { + + "id": 83, + + "aka": "", + + "asn": 5388, + + "name": "Cable&Wireless UK", + + "notes": "This is former Energis Communications UK backbone network (AS5388) which is now owned by Cable and Wireless.\r\n\r\nAS5388 have no direct peering relations any longer, for peering request please contact our backbone AS1273 peering team.\r\n\r\nCable and Wireless global backbone network (AS1273) has a separate PeeringDB entry.\r\n",+ + "org_id": 3152, + + "status": "ok", + + "created": "2004-08-03T10:30:54Z", + + "updated": "2016-03-14T20:23:33Z", + + "website": "http://www.cw.com/uk", + + "info_ipv6": false, + + "info_type": "NSP", + + "name_long": "", + + "info_ratio": "Balanced", + + "info_scope": "Regional", + + "irr_as_set": "AS-ENERGIS", + + "policy_url": "", + + "poc_updated": "2020-01-22T04:24:08Z", + + "info_traffic": "10-20Gbps", + + "info_unicast": true, + + "policy_ratio": false, + + "route_server": "", + + "looking_glass": "http://as5388.net/cgi-bin/lg.pl", + + "info_multicast": false, + + "info_prefixes4": 30, + + "info_prefixes6": 2, + + "netfac_updated": "2016-03-14T21:24:34Z", + + "policy_general": "Restrictive", + + "allow_ixp_update": false, + + "netixlan_updated": null, + + "policy_contracts": "Not Required", + + "policy_locations": "Not Required", + + "info_never_via_route_servers": false + + } + { + + "id": 24, + + "aka": "Extreme Telecom", + + "asn": 19817, + + "name": "DSLExtreme", + + "notes": "", + + "org_id": 62, + + "status": "ok", + + "created": "2004-07-28T00:00:00Z", + + "updated": "2016-03-14T20:47:30Z", + + "website": "http://www.dslextreme.com", + + "info_ipv6": false, + + "info_type": "Cable/DSL/ISP", + + "name_long": "", + + "info_ratio": "Mostly Inbound", + + "info_scope": "Regional", + + "irr_as_set": "", + + "policy_url": "", + + "poc_updated": "2016-03-14T21:35:12Z", + + "info_traffic": "1-5Gbps", + + "info_unicast": true, + + "policy_ratio": false, + + "route_server": "", + + "looking_glass": "", + + "info_multicast": false, + + "info_prefixes4": 69, + + "info_prefixes6": 3, + + "netfac_updated": "2016-03-14T20:33:54Z", + + "policy_general": "Open", + + "allow_ixp_update": false, + + "netixlan_updated": "2021-05-12T00:13:00.764215Z", + + "policy_contracts": "Not Required", + + "policy_locations": "Not Required", + + "info_never_via_route_servers": false + + } +(2 rows) + +#+end_SRC + +*** peeringdb.poc +#+BEGIN_SRC sql-mode +select * from peeringdb.poc limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | net_id | status | data | created | updated | deleted +-----+--------+--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 100 | 115 | ok | {"id": 100, "url": "", "name": "Telefonica DE Peering Team", "role": "Policy", "email": "peering.de@telefonica.com", "phone": "", "net_id": 115, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-05-20T13:55:47Z", "visible": "Public"} | 2010-07-29 00:00:00+00 | 2016-05-20 13:55:47+00 | +(1 row) + +#+end_SRC + + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.poc limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + { + + "id": 100, + + "url": "", + + "name": "Telefonica DE Peering Team",+ + "role": "Policy", + + "email": "peering.de@telefonica.com",+ + "phone": "", + + "net_id": 115, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-05-20T13:55:47Z", + + "visible": "Public" + + } + { + + "id": 48, + + "url": "", + + "name": "NOC", + + "role": "NOC", + + "email": "noc@stealth.net", + + "phone": "+12122322020", + + "net_id": 26, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2020-05-20T23:14:22Z", + + "visible": "Public" + + } + +#+end_SRC + +** Building asn-ip list with Postgres (this requires import of a asn list) +#+BEGIN_SRC sql-mode +create schema asntocompany; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +CREATE SCHEMA +#+end_SRC +Split asn from dump so we have table with ony asns, will name that one `asnproc` +#+BEGIN_SRC tmate :window autonums :dir (concat (getenv "HOME") "/autonums") +cat /home/ii/autonums/asn_company_results.csv | cut -d ',' -f1 | sed 's/"//' | sed 's/"//'| cut -d 'S' -f2 >> asns_only.txt +#+END_SRC + +#+BEGIN_SRC sql-mode +-- create table asnproc ( +-- asn bigint not null primary key +-- ); +\copy asnproc from '/home/ii/autonums/asns_only.txt'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC +** Trying a few queries to see what I see +#+BEGIN_SRC sql-mode +select (net.data ->> 'name') as "name", + asn + from peeringdb.net + where (net.data ->> 'name') ilike '%google%' + limit 5; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + name | asn +--------------------+------- + Google LLC | 15169 + Google LLC AS19527 | 19527 + Google LLC AS36040 | 36040 + Google LLC AS43515 | 43515 + Google Fiber, Inc. | 16591 +(5 rows) + +#+end_SRC + +We only have 23k asns +#+BEGIN_SRC sql-mode +select count(*) + from peeringdb.net + where (net.data ->> 'asn') is not null; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 23097 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select count(*) +from peeringdb.poc p +where (p.data ->> 'email') is not null; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 10756 +(1 row) + +#+end_SRC + + +#+BEGIN_SRC sql-mode +select + (poc.data ->> 'name') as poc_name +from peeringdb.poc poc +-- left join peeringdb.poc poc on ((net.data ->>'name') = (poc.data ->>'name')) +where (poc.data ->> 'name') ilike '%google%' +or (poc.data ->> 'name') ilike '%amazon%' +or (poc.data ->> 'name') ilike '%microsoft%'; +-- where (net.data ->>'name') ilike '%google%'; +-- select data from peeringdb.net where (data ->> 'asn')::bigint = 21789 limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + poc_name +----------------------------------- + noc@google.com + Google Fiber NOC + Diretoria de Tecnologia Amazontel + Diretoria de Tecnologia Amazontel + Ganesh Wakode Google Mail +(5 rows) + +#+end_SRC + +#+BEGIN_SRC sql-mode +begin; +-- create table asnproc ( +-- asn bigint not null primary key +-- ); +-- \copy asnproc from '/home/ii/peeringdb-simplesync/asns.txt'; +select count(*) from peeringdb.poc; +select net.id, + asnproc.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website" + -- (poc.data ->> 'email') as email + from asnproc + join peeringdb.net net on ((net.data ->> 'asn')::bigint = asnproc.asn) + -- left join peeringdb.poc poc on ((poc.data ->> 'name') = 'chonkers') + -- left join peeringdb.poc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) + -- where (net.data ->>'website') is not null + -- order by email asc + limit 5; +rollback; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +BEGIN + count +------- + 34255 +(1 row) + +#+end_SRC + +* Combine data in shadowserver en pyasn datasets +I already have the entire shadowserver table in bq, you can find the method that was used to generate it in https://github.com/ii/org/blob/main/research/shadowserver_asn.org +I also have the entire pyasn lookup in bq method for loading it can be found on https://github.com/ii/org/blob/main/research/pyasn-lookup.org + +Lets get some basic queries working +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql --destination_table k8s_artifacts_dataset_bb_test.tmp_testing 'select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_potaroo_int` limit 20' +#+end_src +Confirming I can replace a table +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql --destination_table k8s_artifacts_dataset_bb_test.tmp_testing --replace 'select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_potaroo_int` limit 10' +#+end_src +Testing appending +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql --destination_table k8s_artifacts_dataset_bb_test.tmp_testing --append_table 'select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_potaroo_int` limit 10' +#+end_src + +** Get asns with all distinct start times +#+begin_src shell +bq query --nouse_legacy_sql 'select count(*) from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_int`' +#+end_src + +#+RESULTS: +#+begin_example ++--------+ +| f0_ | ++--------+ +| 518099 | ++--------+ +#+end_example + +#+begin_src shell +bq query --nouse_legacy_sql 'select count(*) from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended_int`' +#+end_src + +#+RESULTS: +#+begin_example ++--------+ +| f0_ | ++--------+ +| 923058 | ++--------+ +#+end_example + +This join is wonky, not fully understanding what I want to do. +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql 'select pyasn.asn, pyasn.cidr_ip, pyasn.start_ip, pyasn.end_ip, pyasn.start_ip_int, pyasn.end_ip_1 from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_int` as shadow inner join `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended_int` as pyasn on pyasn.asn=shadow.asn limit 10' +#+end_src + +Lazy JOIN! I think this should get us what we want. +Nah this got us what is not between them +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql --destination_table k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded 'select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended_int` where start_ip_int not in (select start_ip_int from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_int`)' +#+end_src +Lets try with except +same result +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql --destination_table k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded 'select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended_int` except distinct select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_int`' +#+end_src + +This produced a table with what looks like a very good combination of everything in a and content of b that is not in a +#+begin_src tmate :window bq_results +bq query --nouse_legacy_sql --destination_table k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded 'select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended_int` UNION DISTINCT (select * from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_int`)' +#+end_src + + +* org tools to process json, keep this in mind for this project +** Post process org blocks +Just making sure we can get to json +#+NAME: json-res +#+BEGIN_SRC sql-mode :var json-r="" +select data from peeringdb.ixlan limit 1; +#+END_SRC + +#+RESULTS: json-res +#+begin_SRC example + {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} + +#+end_SRC + +Dang it I am missing something here.... +#+BEGIN_SRC shell :process_r yes :post json-res[:process_r yes](*this*) +jq '.' +#+END_SRC + +#+RESULTS: +#+begin_example + {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} +#+end_example + +** Wrap header for json. +#+BEGIN_SRC sql-mode :results sql :wrap EXPORT json +select data from peeringdb.ixlan limit 1; +#+END_SRC + +#+RESULTS: +#+begin_EXPORT json + {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} + +#+end_EXPORT diff --git a/research/asn-data-pipeline/ip-from-pyasn.py b/research/asn-data-pipeline/ip-from-pyasn.py new file mode 100644 index 0000000..284ab62 --- /dev/null +++ b/research/asn-data-pipeline/ip-from-pyasn.py @@ -0,0 +1,40 @@ +## Import pyasn and csv +import pyasn +import csv +import sys + +## Set file path +asnFile = sys.argv[1] +asnDat = sys.argv[2] +pyAsnOutput = sys.argv[3] +## Open asnNumFile and read +asnNum = [line.rstrip() for line in open(asnFile, "r+")] + +## assign our dat file connection string +asndb = pyasn.pyasn(asnDat) +## Declare empty dictionary +destDict = {} +singleAsn = "" + +## Loop through list of asns +for singleAsn in asnNum: + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + ## Add checking to make sure we have subnets + ## TODO: insert asn with no routes so we know which failed without having to do a lookup + if not subnets: + print("This ASN has no subnets", singleAsn) + else: + ## Add subnets to our dictionaries with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + ## This is what lets us append each loop to the final destDict + destDict.update(originAsnDict) + +## Open handle to output file +resultsCsv = open(pyAsnOutput, "w") +# write to csv +writer = csv.writer(resultsCsv) +for key, value in destDict.items(): + writer.writerow([key, value]) + +## winner winner chicken dinner diff --git a/research/asn-data-pipeline/main_etl_processor.sh b/research/asn-data-pipeline/main_etl_processor.sh new file mode 100755 index 0000000..479881a --- /dev/null +++ b/research/asn-data-pipeline/main_etl_processor.sh @@ -0,0 +1,143 @@ +# main shell + +#!/bin/bash -x + +gcloud auth activate-service-account "${GCP_SERVICEACCOUNT}" --key-file="${GOOGLE_APPLICATION_CREDENTIALS}" +## GET ASN_COMAPNY section +## using https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_company_table.org +## This will pull a fresh copy, I prefer to use what we have in gs +# curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^,]+), (.*)/"\1", "\3", "\4"/p' | head +# TODO: add if statement to do manual parsing if the gs file is not there +gsutil cp gs://ii_bq_scratch_dump/potaroo_company_asn.csv /tmp/potaroo.csv + +## I want to import the above csv into pg +## Blocked by pg container +## placeholder sql +reate table company_asn (asn varchar, name varchar); +COPY company_asn from '/tmp/potaroo_company_asn.csv' DELIMITER ',' CSV; + + +## GET PYASN section +## using https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_vendor_table.org + +## pyasn installs its utils in ~/.local/bin/* +## Add pyasn utils to path (dockerfile?) +export PATH="/home/ii/.local/bin/:$PATH" +## full list of RIB files on ftp://archive.routeviews.org//bgpdata/2021.05/RIBS/ +cd /tmp +pyasn_util_download.py --latest +## Convert rib file to .dat we can process +pyasn_util_convert.py --single rib.latest.bz2 ipasn_latest.dat +## Run the py script we are including in the docker image +python ./ii-pyasn.py + +## Load csv into pg +## placeholder sql +create table pyasn_ip_asn (ip cidr, asn int); +\COPY pyasn_ip_asn from '/home/ii/foo/pyAsnOutput.csv' DELIMITER ',' CSV; +## Split subnet into start and end + select asn as asn, + ip as ip, + host(network(ip)::inet) as ip_start, + host(broadcast(ip)::inet) as ip_end + into table pyasn_ip_asn_extended + from pyasn_ip_asn; + + ## Copy the results to cs + \copy (select * from pyasn_ip_asn_extended) to '/tmp/pyasn_expanded_ipv4.csv' csv header; + ## Load csv to bq + bq load --autodetect k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended /tmp/pyasn_expanded_ipv4.csv + ## Lets go convert the beginning and end into ints + bq query --nouse_legacy_sql \ + ' + SELECT + asn as asn, + ip as cidr_ip, + ip_start as start_ip, + ip_end as end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_start)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_end)) AS end_ip + from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended` + WHERE regexp_contains(ip_start, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"); + ' + +## This should be the end of pyasn section, we have results table that covers start_ip/end_ip from fs our requirements +## GET k8s asn yaml using: +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org +## Lets create csv's to import +## TODO: refactor this to loop that can generate these in a couple of passes +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/microsoft.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/microsoft_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/google.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/google_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/amazon.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/amazon_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/alibabagroup.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/alibabagroup_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/baidu.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/baidu_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/digitalocean.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/digitalocean_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/equinixmetal.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/equinixmetal_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/huawei.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/huawei_yaml.csv +curl -s https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/tencentcloud.yaml | yq e . -j - \ +| jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [.,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' > /tmp/tencentcloud_yaml.csv + +## Load all the csv +## TODO: Make this into a loop. +## TODO: Set a final destination table +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/microsoft_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/google_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/amazon_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/alibabagroup_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/baidu_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/digitalocean_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/equinixmetal_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/huawei_yaml.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.k8s_repo_json /tmp/tencentcloud_yaml.csv + +## GET Vendor YAML +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org +curl 'https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20210607.json' | jq -r \ +'.values[] | .properties.platform as $service | .properties.region as $region | .properties.addressPrefixes[] | [., $service, $region] | @csv' > /tmp/microsoft_subnet_region.csv +curl 'https://www.gstatic.com/ipranges/cloud.json' | jq -r '.prefixes[] | [.ipv4Prefix, .service, .scope] | @csv' > /tmp/google_raw_subnet_region.csv +curl 'https://ip-ranges.amazonaws.com/ip-ranges.json' | jq -r '.prefixes[] | [.ip_prefix, .service, .region] | @csv' > /tmp/amazon_raw_subnet_region.csv + +## Load all the csv +## TODO: Make this into a loop. +## TODO: Set a final destination table +bq load --autodetect k8s_artifacts_dataset_bb_test.amazon_raw_subnet_region /tmp/amazon_raw_subnet_region.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.google_raw_subnet_region /tmp/google_raw_subnet_region.csv +bq load --autodetect k8s_artifacts_dataset_bb_test.microsoft_raw_subnet_region /tmp/microsoft_subnet_region.csv + +## GET Metadata from peeringdb +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_metadata_table.org +## In docker file section above, make sure credentials is set, psycopg2 is installed +## Import the schema from the repo +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < schema.sql +## Run the sync to populate the database +python3 ./sync.py +## Lets get a table with asns only +cat /home/ii/potaroo_company_asn.csv | cut -d ',' -f1 | sed 's/"//' | sed 's/"//'| cut -d 'S' -f2 >> asns_only.txt +## placeholder for sql we will need to import asn_only from + create table asnproc ( + asn bigint not null primary key + ); +\copy asnproc from '/home/ii/autonums/asns_only.txt'; +## Placeholder sql for joining peeringdb to produce output with email, website + \copy ( select distinct asn.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website", + (poc.data ->> 'email') as email + into asn_name_web_email + from asnproc asn + left join peeringdb.net net on (net.asn = asn.asn) + left join peeringdb.poc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) + -- where (net.data ->>'website') is not null + -- where (poc.data ->> 'email') is not null + order by email asc) to '/tmp/peeringdb_metadata.csv' csv header;; + +## Load output to bq +bq load --autodetect k8s_artifacts_dataset_bb_test.amazon_raw_subnet_region /tmp/amazon_raw_subnet_region.csv diff --git a/research/asn-data-pipeline/match-ip-to-ip-range.org b/research/asn-data-pipeline/match-ip-to-ip-range.org new file mode 100644 index 0000000..16a5464 --- /dev/null +++ b/research/asn-data-pipeline/match-ip-to-ip-range.org @@ -0,0 +1,494 @@ +#+TITLE: Match IP to IP range + +* Bringing up Postgres +** Secrets +#+name: postgres-secret +#+begin_src yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-configuration +stringData: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + PGDATABASE: postgres + PGUSER: postgres +#+end_src +** Deployment +#+name: postgres-deployment +#+begin_src yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc + labels: + app: postgres +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + labels: + app: postgres +spec: + replicas: 1 + serviceName: "postgres" + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + restartPolicy: Always + containers: + - name: postgres + image: docker.io/postgres:12.2-alpine + securityContext: + readOnlyRootFilesystem: true + runAsUser: 70 + runAsGroup: 70 + allowPrivilegeEscalation: false + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + - name: var-run-postgresql + mountPath: /var/run/postgresql + - name: tmp + mountPath: /tmp + ports: + - containerPort: 5432 + livenessProbe: + exec: + command: + - "sh" + - "-c" + - "pg_isready" + - "-U" + - "$POSTGRES_USER" + failureThreshold: 5 + periodSeconds: 10 + timeoutSeconds: 5 + env: + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_DB + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + - name: PGDATABASE + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGDATABASE + - name: PGUSER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGUSER + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + initContainers: + - name: postgres-db-permissions-fix + image: alpine:3.12 + command: + - /bin/sh + - -c + - "/bin/chown -R 70:70 /var/lib/postgresql/data" + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + volumes: + - name: var-lib-postgresql + persistentVolumeClaim: + claimName: postgres-pvc + - name: var-run-postgresql + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: "5432" + port: 5432 + targetPort: 5432 +#+end_src +** Deploying +#+begin_src shell :noweb yes +kubectl apply -f - << EOF +<> +--- +<> +EOF +#+end_src + +#+RESULTS: +#+begin_example +secret/postgres-configuration created +persistentvolumeclaim/postgres-pvc created +statefulset.apps/postgres created +service/postgres created +#+end_example + +* Setting local vars +Use /C-c C-v s/ to execute the following blocks +** Defaults for sql-mode blocks +#+begin_src elisp :results silent +(set (make-local-variable 'org-babel-default-header-args:sql-mode) + ;; Set up all sql-mode blocks to be postgres and literate + '((:results . "replace code") + (:product . "postgres") + (:session . "none") + (:noweb . "yes") + (:comments . "no") + (:wrap . "SRC example"))) +#+end_src + +** Default for connecting to sql-mode +#+begin_src elisp :results silent +(set (make-local-variable 'sql-server) "postgres") +(set (make-local-variable 'sql-port) 5432) +(set (make-local-variable 'sql-user) "postgres") +(set (make-local-variable 'sql-database) "postgres") +(set (make-local-variable 'sql-product) '(quote postgres)) +#+end_src + +** Default for creating new sql-mode connections +#+begin_src elisp :results silent +(set (make-local-variable 'sql-connection-alist) + (list + ;; setting these allows for the connection to be + ;; created on the fly + (list 'none + (list 'sql-product '(quote postgres)) + (list 'sql-user sql-user) + (list 'sql-database sql-database) + (list 'sql-port sql-port) + (list 'sql-server sql-server)))) +#+end_src + + +* Connecting to Postgres +Connection string: =postgres://postgres:password@postgres/postgres= + +* Sign in to Google auth +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC + +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +Login as application +#+begin_src tmate :window prepare +gcloud auth application-default login +#+end_src + +* Prepare IP dataset +** usage_all_ip_only_distinct_int +Copy data to a CSV file in a bucket +#+begin_src shell :prologue "( " :epilogue " ) 2>&1 ; :" +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-timestamp.txt + +bq extract \ + --destination_format CSV \ + k8s-infra-ii-sandbox:k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv +#+end_src + +#+RESULTS: +#+begin_example + +Welcome to BigQuery! This script will walk you through the +process of initializing your .bigqueryrc configuration file. + +First, we need to set up your credentials if they do not +already exist. + +Credential creation complete. Now we will select a default project. + +List of projects: + # projectId friendlyName + --- ------------------------------ ------------------------------ + 1 apisnoop apisnoop + 2 k8s-artifacts-prod k8s-artifacts-prod + 3 k8s-cip-test-prod k8s-cip-test-prod + 4 k8s-infra-e2e-scale-project k8s-infra-e2e-scale-project + 5 k8s-infra-ii-sandbox k8s-infra-ii-sandbox + 6 k8s-infra-prow-build k8s-infra-prow-build + 7 k8s-infra-prow-build-trusted k8s-infra-prow-build-trusted + 8 k8s-infra-public-pii k8s-infra-public-pii + 9 kubernetes-public kubernetes-public +Found multiple projects. Please enter a selection for +which should be the default, or leave blank to not +set a default. + +Enter a selection (1 - 9): Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (0s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (1s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (2s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (3s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (4s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (5s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (6s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (7s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (7s) Current status: DONE + +Got EOF; exiting. Is your input from a terminal? +#+end_example + +List csv files +#+begin_src shell +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-timestamp.txt | tr -d '\n') +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv | head +echo "..." +printf "Total: " +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv | wc -l +#+end_src + +#+RESULTS: +#+begin_example +gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-202106141504-000000000000.csv +... +Total: 1 +#+end_example + +Download data +#+begin_src tmate :window prepare +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-timestamp.txt | tr -d '\n') +mkdir -p /tmp/usage_all_ip_only/ +gsutil cp \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv \ + /tmp/usage_all_ip_only/ +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +Merge the data +#+begin_src tmate :window prepare +cat /tmp/usage_all_ip_only/*.csv | tail +2 > /tmp/usage_all_ip_only.csv +#+end_src + +** shadow_pyasn_expanded +Copy data to a CSV file in a bucket +#+begin_src shell :prologue "( " :epilogue " ) 2>&1 ; :" +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-timestamp.txt + +bq extract \ + --destination_format CSV \ + k8s-infra-ii-sandbox:k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv +#+end_src + +#+RESULTS: +#+begin_example + Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (0s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (1s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (2s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (3s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (4s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (5s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (6s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (8s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (8s) Current status: DONE +#+end_example + +List csv files +#+begin_src shell +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-timestamp.txt | tr -d '\n') +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv | head +echo "..." +printf "Total: " +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv | wc -l +#+end_src + +#+RESULTS: +#+begin_example +gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-202106141509-000000000000.csv +... +Total: 1 +#+end_example + +Download data +#+begin_src tmate :window prepare +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-timestamp.txt | tr -d '\n') +mkdir -p /tmp/shadow_pyasn_expanded/ +gsutil cp \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv \ + /tmp/shadow_pyasn_expanded/ +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +Merge the data +#+begin_src tmate :window prepare +cat /tmp/shadow_pyasn_expanded/*.csv | tail +2 > /tmp/shadow_pyasn_expanded.csv +#+end_src + +** shadow_pyasn_expanded +k8s-infra-ii-sandbox:k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded + +* Create schema in Postgres +#+begin_src sql-mode +create table if not exists cust_ip ( + c_ip bigint not null +); +#+end_src + +#+RESULTS: +#+begin_SRC example +CREATE TABLE +#+end_SRC + + +#+begin_src sql-mode +create table if not exists shadow_pyasn_expanded ( + asn text, + cidr_ip cidr, + start_ip inet, + end_i inet, + start_ip_net bigint, + end_ip_1 bigint +); +#+end_src + +#+RESULTS: +#+begin_SRC example +CREATE TABLE +#+end_SRC + + + +* Insert data +#+begin_src tmate :window prepare +export PGUSER=ii; PGPASSWORD=DxSO4S1aUQG3dHoG8AXogt0rbm2PGc6HsVAVtSKnbsJF5bwi0CTKamGBULq6rhnu; +psql -U ii -d ii -h ii-ii-pooler.ii-db.svc.cluster.local -c "\\copy cust_ip from '/tmp/usage_all_ip_only.csv';" +#+end_src + + +#+begin_src tmate :window prepare +export PGUSER=ii; export PGPASSWORD=DxSO4S1aUQG3dHoG8AXogt0rbm2PGc6HsVAVtSKnbsJF5bwi0CTKamGBULq6rhnu; +psql -U ii -d ii -h ii-ii-pooler.ii-db.svc.cluster.local -c "\\copy shadow_pyasn_expanded from '/tmp/shadow_pyasn_expanded.csv' (DELIMITER(','));" +#+end_src + +* Discover the data +#+begin_src sql-mode +select count(*) from cust_ip; +#+end_src + +#+RESULTS: +#+begin_SRC example + count +--------- + 7417599 +(1 row) + +#+end_SRC + + +#+begin_src sql-mode +select count(*) from shadow_pyasn_expanded; +#+end_src + +#+RESULTS: +#+begin_SRC example + count +-------- + 927411 +(1 row) + +#+end_SRC + +* Add indexes to the tables + +#+begin_src sql-mode +create index on shadow_pyasn_expanded (end_ip_1); +#+end_src +#+begin_src sql-mode +create index on shadow_pyasn_expanded (start_ip_net); +#+end_src +#+begin_src sql-mode +create index on cust_ip (c_ip); +#+end_src + +* Join the data +#+begin_src sql-mode +select 1,2,3; +#+end_src + +#+RESULTS: +#+begin_SRC example + ?column? | ?column? | ?column? +----------+----------+---------- + 1 | 2 | 3 +(1 row) + +#+end_SRC + +#+begin_src sql-mode +SELECT +shadow_pyasn_expanded.cidr_ip, +shadow_pyasn_expanded.start_ip_net, +shadow_pyasn_expanded.end_ip_1, +shadow_pyasn_expanded.asn, +cust_ip.c_ip +FROM +shadow_pyasn_expanded, +cust_ip +WHERE +cust_ip.c_ip >= shadow_pyasn_expanded.start_ip_net +AND cust_ip.c_ip <= shadow_pyasn_expanded.end_ip_1 +LIMIT 10 +; +#+end_src + +#+begin_src sql-mode +\copy ( + SELECT + shadow_pyasn_expanded.cidr_ip, + shadow_pyasn_expanded.start_ip_net, + shadow_pyasn_expanded.end_ip_1, + shadow_pyasn_expanded.asn, + cust_ip.c_ip +FROM + shadow_pyasn_expanded, + cust_ip +WHERE + cust_ip.c_ip >= shadow_pyasn_expanded.start_ip_net +AND cust_ip.c_ip <= shadow_pyasn_expanded.end_ip_1 +) +TO + '/tmp/match-ip-to-iprange.csv' +CSV +HEADER +; +#+end_src +#+begin_src sql-mode +\copy ( SELECT shadow_pyasn_expanded.cidr_ip, shadow_pyasn_expanded.start_ip_net, shadow_pyasn_expanded.end_ip_1, shadow_pyasn_expanded.asn, cust_ip.c_ip FROM shadow_pyasn_expanded, cust_ip WHERE cust_ip.c_ip >= shadow_pyasn_expanded.start_ip_net AND cust_ip.c_ip <= shadow_pyasn_expanded.end_ip_1) TO '/tmp/match-ip-to-iprange.csv' CSV HEADER; +#+end_src + +#+RESULTS: +#+begin_SRC example +#+end_SRC + +I ended up adding indexes and that got it to complete +I have to come back to add the missing steps I did to get a successful csv' +* Upload results to bq +#+begin_src tmate :window prepare +bq load --autodetect k8s_artifacts_dataset_bb_test.match_ip_range_to_asn /tmp/match-ip-to-iprange.csv +#+end_src diff --git a/research/asn-data-pipeline/pg-init.d/00-get-dependencies.sh b/research/asn-data-pipeline/pg-init.d/00-get-dependencies.sh new file mode 100644 index 0000000..f4ab02c --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/00-get-dependencies.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -x + +set -eo pipefail +eval "${ASN_DATA_PIPELINE_PREINIT:-}" + +PARENTPID=$(ps -o ppid= -p $$) +echo MY PID :: $$ +echo PARENT PID :: $PARENTPID +ps aux + +cat << EOF > $HOME/.bigqueryrc +credential_file = ${GOOGLE_APPLICATION_CREDENTIALS} +project_id = ${GCP_PROJECT} +EOF + +gcloud config set project "${GCP_PROJECT}" + +## This is just to continue testing wile I wait for permissions for the service account +## Use the activate-service-account live once it has permissions +## The container is being run it so it should let me manually do the auth +# gcloud auth login +gcloud auth activate-service-account "${GCP_SERVICEACCOUNT}" --key-file="${GOOGLE_APPLICATION_CREDENTIALS}" + +gcloud auth list + +## GET ASN_COMAPNY section +## using https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_company_table.org +## This will pull a fresh copy, I prefer to use what we have in gs +# curl -s https://bgp.potaroo.net/cidr/autnums.html | sed -nre '/AS[0-9]/s/.*as=([^&]+)&.*">([^<]+)<\/a> ([^,]+), (.*)/"\1", "\3", "\4"/p' | head + +bq ls +# Remove the previous data set +bq rm -r -f "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" || true + +# initalise a new data set with the given name +bq mk \ + --dataset \ + --description "etl pipeline dataset for ASN data from CNCF supporting vendors of k8s infrastructure" \ + "${GCP_PROJECT}:${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" + +if [ ! -f "/tmp/potaroo_data.csv" ]; then + gsutil cp gs://ii_bq_scratch_dump/potaroo_company_asn.csv /tmp/potaroo_data.csv +fi + +# Strip data to only return ASN numbers +cat /tmp/potaroo_data.csv | cut -d ',' -f1 | sed 's/"//' | sed 's/"//'| cut -d 'S' -f2 | tail +2 > /tmp/potaroo_asn.txt + +cat /tmp/potaroo_data.csv | tail +2 | sed 's,^AS,,g' > /tmp/potaroo_asn_companyname.csv + +## GET PYASN section +## using https://github.com/ii/org/blob/main/research/asn-data-pipeline/etl_asn_vendor_table.org + +## pyasn installs its utils in ~/.local/bin/* +## Add pyasn utils to path (dockerfile?) +## full list of RIB files on ftp://archive.routeviews.org//bgpdata/2021.05/RIBS/ +cd /tmp +if [ ! -f "rib.latest.bz2" ]; then + pyasn_util_download.py --latest + mv rib.*.*.bz2 rib.latest.bz2 +fi +## Convert rib file to .dat we can process +if [ ! -f "ipasn_latest.dat" ]; then + pyasn_util_convert.py --single rib.latest.bz2 ipasn_latest.dat +fi +## Run the py script we are including in the docker image +python3 /app/ip-from-pyasn.py /tmp/potaroo_asn.txt ipasn_latest.dat /tmp/pyAsnOutput.csv +## This will output pyasnOutput.csv diff --git a/research/asn-data-pipeline/pg-init.d/01-migrate-schemas.sql b/research/asn-data-pipeline/pg-init.d/01-migrate-schemas.sql new file mode 100644 index 0000000..a3b3a8a --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/01-migrate-schemas.sql @@ -0,0 +1,37 @@ +begin; + +create table if not exists cust_ip ( + c_ip bigint not null +); + +create table if not exists vendor_expanded_int ( + asn text, + cidr_ip cidr, + start_ip inet, + end_ip inet, + start_ip_int bigint, + end_ip_int bigint, + name_with_yaml_name varchar +); + +create table company_asn ( + asn varchar, + name varchar +); +create table pyasn_ip_asn ( + ip cidr, + asn int +); +create table asnproc ( + asn bigint not null primary key +); + +create table peeriingdbnet ( + data jsonb +); + +create table peeriingdbpoc ( + data jsonb +); + +commit; diff --git a/research/asn-data-pipeline/pg-init.d/02-load-pyasn-output.sql b/research/asn-data-pipeline/pg-init.d/02-load-pyasn-output.sql new file mode 100644 index 0000000..da489de --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/02-load-pyasn-output.sql @@ -0,0 +1,15 @@ +copy company_asn from '/tmp/potaroo_data.csv' delimiter ',' csv; +copy pyasn_ip_asn from '/tmp/pyAsnOutput.csv' delimiter ',' csv; + +-- Split subnet into start and end +select + asn as asn, + ip as ip, + host(network(ip)::inet) as ip_start, + host(broadcast(ip)::inet) as ip_end +into + table pyasn_ip_asn_extended +from pyasn_ip_asn; + +-- Copy the results to cs +copy (select * from pyasn_ip_asn_extended) to '/tmp/pyasn_expanded_ipv4.csv' csv header; diff --git a/research/asn-data-pipeline/pg-init.d/03-load-into-a-bigquery-dataset.sh b/research/asn-data-pipeline/pg-init.d/03-load-into-a-bigquery-dataset.sh new file mode 100644 index 0000000..b73e190 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/03-load-into-a-bigquery-dataset.sh @@ -0,0 +1,73 @@ +## Load csv to bq +bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).potaroo_all_asn_name" /tmp/potaroo_asn_companyname.csv asn:integer,companyname:string + +## Load a copy of the potaroo_data to bq +# https://github.com/ii/org/blob/main/research/asn-data-pipeline/match-ip-to-ip-range.org +bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).pyasn_ip_asn_extended" /tmp/pyasn_expanded_ipv4.csv asn:integer,ip:string,ip_start:string,ip_end:string + +## Lets go convert the beginning and end into ints +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +envsubst < /app/ext-ip-asn.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).vendor" + +mkdir -p /tmp/vendor + +VENDORS=( + microsoft + google + amazon + alibabagroup + baidu + digitalocean + equinixmetal + huawei + tencentcloud +) +## This should be the end of pyasn section, we have results table that covers start_ip/end_ip from fs our requirements +## GET k8s asn yaml using: +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org +## Lets create csv's to import +for VENDOR in ${VENDORS[*]}; do + curl -s "https://raw.githubusercontent.com/kubernetes/k8s.io/main/registry.k8s.io/infra/meta/asns/${VENDOR}.yaml" \ + | yq e . -j - \ + | jq -r '.name as $name | .redirectsTo.registry as $redirectsToRegistry | .redirectsTo.artifacts as $redirectsToArtifacts | .asns[] | [. ,$name, $redirectsToRegistry, $redirectsToArtifacts] | @csv' \ + > "/tmp/vendor/${VENDOR}_yaml.csv" + bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).vendor_yaml" "/tmp/vendor/${VENDOR}_yaml.csv" asn_yaml:integer,name_yaml:string,redirectsToRegistry:string,redirectsToArtifacts:string +done + +ASN_VENDORS=( + amazon + google + microsoft +) + +## GET Vendor YAML +## https://github.com/ii/org/blob/main/research/asn-data-pipeline/asn_k8s_yaml.org +## TODO: Make this a loop that goes through dates to find a working URL +## curl "https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_$(date --date='-2 days' +%Y%m%d).json" \ +curl "https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20210906.json" \ + | jq -r '.values[] | .properties.platform as $service | .properties.region as $region | .properties.addressPrefixes[] | [., $service, $region] | @csv' \ + > /tmp/vendor/microsoft_raw_subnet_region.csv +curl 'https://www.gstatic.com/ipranges/cloud.json' \ + | jq -r '.prefixes[] | [.ipv4Prefix, .service, .scope] | @csv' \ + > /tmp/vendor/google_raw_subnet_region.csv +curl 'https://ip-ranges.amazonaws.com/ip-ranges.json' \ + | jq -r '.prefixes[] | [.ip_prefix, .service, .region] | @csv' \ + > /tmp/vendor/amazon_raw_subnet_region.csv + +## Load all the csv +for VENDOR in ${ASN_VENDORS[*]}; do + bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).vendor_json" "/tmp/vendor/${VENDOR}_raw_subnet_region.csv" ipprefix:string,service:string,region:string +done + +mkdir -p /tmp/peeringdb-tables +PEERINGDB_TABLES=( + net + poc +) +for PEERINGDB_TABLE in ${PEERINGDB_TABLES[*]}; do + curl -sG "https://www.peeringdb.com/api/${PEERINGDB_TABLE}" | jq -c '.data[]' | sed 's,",\",g' > "/tmp/peeringdb-tables/${PEERINGDB_TABLE}.json" +done + +# /tmp/potaroo_asn.txt + +## placeholder for sql we will need to import asn_only from diff --git a/research/asn-data-pipeline/pg-init.d/04-load-asn-data.sql b/research/asn-data-pipeline/pg-init.d/04-load-asn-data.sql new file mode 100644 index 0000000..189ef61 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/04-load-asn-data.sql @@ -0,0 +1,16 @@ +copy asnproc from '/tmp/potaroo_asn.txt'; + +copy peeriingdbnet (data) from '/tmp/peeringdb-tables/net.json' csv quote e'\x01' delimiter e'\x02'; +copy peeriingdbpoc (data) from '/tmp/peeringdb-tables/poc.json' csv quote e'\x01' delimiter e'\x02'; + +copy ( + select distinct asn.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website", + (poc.data ->> 'email') as email + from asnproc asn + left join peeriingdbnet net on (cast(net.data::jsonb ->> 'asn' as bigint) = asn.asn) + left join peeriingdbpoc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) +-- where (net.data ->>'website') is not null +-- where (poc.data ->> 'email') is not null + order by email asc) to '/tmp/peeringdb_metadata_prepare.csv' csv header; diff --git a/research/asn-data-pipeline/pg-init.d/05-bq-load-metadata.sh b/research/asn-data-pipeline/pg-init.d/05-bq-load-metadata.sh new file mode 100644 index 0000000..91c6847 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/05-bq-load-metadata.sh @@ -0,0 +1,4 @@ +## Load output to bq +tail +2 /tmp/peeringdb_metadata_prepare.csv > /tmp/peeringdb_metadata.csv + +bq load --autodetect "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).metadata" /tmp/peeringdb_metadata.csv asn:integer,name:string,website:string,email:string diff --git a/research/asn-data-pipeline/pg-init.d/06-bq-load-logs.sh b/research/asn-data-pipeline/pg-init.d/06-bq-load-logs.sh new file mode 100644 index 0000000..919fbc8 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/06-bq-load-logs.sh @@ -0,0 +1,18 @@ +## Load logs to bq +if [ -z "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + echo "Using dataset logs, since \$GCP_BIGQUERY_DATASET_LOGS was provided and set to '$GCP_BIGQUERY_DATASET_LOGS'" + BUCKETS=( + asia.artifacts.k8s-artifacts-prod.appspot.com + eu.artifacts.k8s-artifacts-prod.appspot.com + k8s-artifacts-cni + k8s-artifacts-cri-tools + k8s-artifacts-csi + k8s-artifacts-gcslogs + k8s-artifacts-kind + k8s-artifacts-prod + us.artifacts.k8s-artifacts-prod.appspot.com + ) + for BUCKET in ${BUCKETS[*]}; do + bq load --autodetect --max_bad_records=2000 ${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).usage_all_raw gs://k8s-infra-artifacts-gcslogs/${BUCKET}_usage* || true + done +fi diff --git a/research/asn-data-pipeline/pg-init.d/07_bq_usage_data_transformation.sh b/research/asn-data-pipeline/pg-init.d/07_bq_usage_data_transformation.sh new file mode 100644 index 0000000..81c336b --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/07_bq_usage_data_transformation.sh @@ -0,0 +1,12 @@ +## Get single clientip as int. +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +if [ -n "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + envsubst < /app/distinct_c_ip_count_logs.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count" +else + envsubst < /app/distinct_c_ip_count.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.1_ip_count" +fi +envsubst < /app/distinct_ip_int.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.2_ip_int" +envsubst < /app/distinct_ipint_only.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.2a_ip_int" +envsubst < /app/potaroo_extra_yaml_name_column.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.3_potaroo_with_yaml_name_column" +envsubst < /app/potaroo_yaml_name_subbed.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.4_potaroo_with_yaml_name_subbed" +envsubst < /app/vendor_with_company_name.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET_WITH_DATE}.5_vendor_with_company_name" diff --git a/research/asn-data-pipeline/pg-init.d/08_download_c_ip_int.sh b/research/asn-data-pipeline/pg-init.d/08_download_c_ip_int.sh new file mode 100644 index 0000000..1e37aa6 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/08_download_c_ip_int.sh @@ -0,0 +1,17 @@ +## Set a timestamp to work with +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/my-timestamp.txt +## Dump the entire table to gcs +bq extract \ +--destination_format CSV \ +${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).2a_ip_int \ +gs://ii_bq_scratch_dump/2a_ip_inti-$TIMESTAMP-*.csv +## Download the files +TIMESTAMP=$(cat /tmp/my-timestamp.txt | tr -d '\n') +mkdir -p /tmp/usage_all_ip_only/ +gsutil cp \ +gs://ii_bq_scratch_dump/2a_ip_inti-$TIMESTAMP-*.csv \ +/tmp/usage_all_ip_only/ +## Merge the data +cat /tmp/usage_all_ip_only/*.csv | tail +2 > /tmp/usage_all_ip_only_1.csv +cat /tmp/usage_all_ip_only_1.csv | grep -v c_ip_int > /tmp/usage_all_ip_only.csv diff --git a/research/asn-data-pipeline/pg-init.d/09_download_expanded_ips.sh b/research/asn-data-pipeline/pg-init.d/09_download_expanded_ips.sh new file mode 100644 index 0000000..0e52291 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/09_download_expanded_ips.sh @@ -0,0 +1,17 @@ +## Set a timestamp to work with +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/my-timestamp.txt +## Dump the entire table to gcs +bq extract \ +--destination_format CSV \ +${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).5_vendor_with_company_name \ +gs://ii_bq_scratch_dump/vendor-$TIMESTAMP-*.csv +## Download the files +TIMESTAMP=$(cat /tmp/my-timestamp.txt | tr -d '\n') +mkdir -p /tmp/expanded_pyasn/ +gsutil cp \ +gs://ii_bq_scratch_dump/vendor-$TIMESTAMP-*.csv \ +/tmp/expanded_pyasn/ +## Merge the data +cat /tmp/expanded_pyasn/*.csv | tail +2 > /tmp/expanded_pyasn_1.csv +cat /tmp/expanded_pyasn_1.csv | grep -v cidr_ip > /tmp/expanded_pyasn.csv diff --git a/research/asn-data-pipeline/pg-init.d/10-load-single-ip-int.sql b/research/asn-data-pipeline/pg-init.d/10-load-single-ip-int.sql new file mode 100644 index 0000000..51bcd4f --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/10-load-single-ip-int.sql @@ -0,0 +1,10 @@ +-- Copy the customer ip in +copy cust_ip from '/tmp/usage_all_ip_only.csv'; +-- Copy pyasn expanded in +copy vendor_expanded_int from '/tmp/expanded_pyasn.csv' (DELIMITER(',')); +-- Indexes on the Data we are about to range +create index on vendor_expanded_int (end_ip_int); +create index on vendor_expanded_int (start_ip_int); +create index on cust_ip (c_ip); + +copy ( SELECT vendor_expanded_int.cidr_ip, vendor_expanded_int.start_ip, vendor_expanded_int.end_ip, vendor_expanded_int.asn, vendor_expanded_int.name_with_yaml_name, cust_ip.c_ip FROM vendor_expanded_int, cust_ip WHERE cust_ip.c_ip >= vendor_expanded_int.start_ip_int AND cust_ip.c_ip <= vendor_expanded_int.end_ip_int) TO '/tmp/match-ip-to-iprange.csv' CSV HEADER; diff --git a/research/asn-data-pipeline/pg-init.d/11-upload-ip-range-2-ip.sh b/research/asn-data-pipeline/pg-init.d/11-upload-ip-range-2-ip.sh new file mode 100644 index 0000000..e18f6a3 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/11-upload-ip-range-2-ip.sh @@ -0,0 +1 @@ +bq load --autodetect ${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).6_ip_range_2_ip_lookup /tmp/match-ip-to-iprange.csv diff --git a/research/asn-data-pipeline/pg-init.d/12_add_c_ip_int_to_usage_all.sh b/research/asn-data-pipeline/pg-init.d/12_add_c_ip_int_to_usage_all.sh new file mode 100644 index 0000000..46a4f3f --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/12_add_c_ip_int_to_usage_all.sh @@ -0,0 +1,6 @@ +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +if [ -n "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + envsubst < /app/add_c_ip_int_to_usage_all_no_logs.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).usage_all_raw_int" +else + envsubst < /app/add_c_ip_int_to_usage_all.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).usage_all_raw_int" +fi diff --git a/research/asn-data-pipeline/pg-init.d/13_prepare_final_table.sh b/research/asn-data-pipeline/pg-init.d/13_prepare_final_table.sh new file mode 100644 index 0000000..9aca659 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/13_prepare_final_table.sh @@ -0,0 +1,7 @@ +## Get single clientip as int. +export GCP_BIGQUERY_DATASET_WITH_DATE="${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d)" +if [ -n "${GCP_BIGQUERY_DATASET_LOGS:-}" ]; then + envsubst < /app/join_all_the_things_no_logs.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).7_asn_company_c_ip_lookup" +else + envsubst < /app/join_all_the_things.sql | bq query --nouse_legacy_sql --replace --destination_table "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).7_asn_company_c_ip_lookup" +fi diff --git a/research/asn-data-pipeline/pg-init.d/14-promote-bq-dataset-as-prod.sh b/research/asn-data-pipeline/pg-init.d/14-promote-bq-dataset-as-prod.sh new file mode 100644 index 0000000..8c9276f --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/14-promote-bq-dataset-as-prod.sh @@ -0,0 +1,6 @@ +for TABLE in $(bq ls ${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d) | awk '{print $1}' | tail +3 | xargs); do + echo "Removing table '${GCP_BIGQUERY_DATASET}.$TABLE'" + bq rm -f "${GCP_BIGQUERY_DATASET}.$TABLE"; + echo "Copying table '${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).$TABLE' to '${GCP_BIGQUERY_DATASET}.$TABLE'" + bq cp --noappend_table --nono_clobber -f "${GCP_BIGQUERY_DATASET}_$(date +%Y%m%d).$TABLE" "${GCP_BIGQUERY_DATASET}.$TABLE"; +done diff --git a/research/asn-data-pipeline/pg-init.d/15-stop-postgres.sh b/research/asn-data-pipeline/pg-init.d/15-stop-postgres.sh new file mode 100644 index 0000000..0dee154 --- /dev/null +++ b/research/asn-data-pipeline/pg-init.d/15-stop-postgres.sh @@ -0,0 +1,23 @@ +if [ ! "${ASN_DATA_PIPELINE_RETAIN:-}" = true ]; then + # in the Postgres container image, + # the command run changes to "postgres" once it's completed loading up + # and is in a ready state and all of the init scripts have run + # + # here we wait for that state and attempt to exit cleanly, without error + ( + # discover where the postgres process is, even if Prow has injected a PID 1 process + PARENTPID=$(ps -o ppid= -p $$ | awk '{print $1}') + echo MY PID :: $$ + echo PARENT PID :: $PARENTPID + PID=$$ + if [ ! "$(cat /proc/$PARENTPID/cmdline)" = "/tools/entrypoint" ] && [ ! $PARENTPID -eq 0 ]; then + PID=$PARENTPID + fi + ps aux + until [ "$(cat /proc/$PID/cmdline | tr '\0' '\n' | head -n 1)" = "postgres" ]; do + sleep 1s + done + # exit Postgres with a code of 0 + pg_ctl kill QUIT $PID + ) & +fi diff --git a/research/big-query.org b/research/big-query.org new file mode 100644 index 0000000..d9634dc --- /dev/null +++ b/research/big-query.org @@ -0,0 +1,44 @@ +#+TITLE: Big Query Sql-Mode SRC Blocks +* gcloud auth login first +Via tmate so we can interact +#+BEGIN_SRC tmate :window gcloud +gcloud auth login +#+END_SRC +* gcloud default to k8s-infra-ii-sandbox +#+BEGIN_SRC shell :results silent +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC +* start bq shell in tmate :window 'bq' +#+BEGIN_SRC tmate :window bq +bq shell +#+END_SRC +* start bq comint-shell in emacs *SQL* buffer + +This will create an SQLi (interactive) 'comint' buffer backed by a bq shell. +By default the name of the buffer is `*SQL*` + +#+BEGIN_SRC elisp +(sql-product-interactive 'bq) +#+END_SRC + +#+RESULTS: +#+begin_src elisp +# +#+end_src + +* Org Block Execution of Big Query SRC blocks + +This will also create an SQLi buffer, but it's default name is `*SQL: bq:none*` and you can have multiple sessions. + +#+begin_src sql-mode :product bq +select 1+2; +#+end_src + +#+RESULTS: +#+begin_SRC example ++-----+ +| f0_ | ++-----+ +| 3 | ++-----+ +#+end_SRC diff --git a/research/big_query-scratch.org b/research/big_query-scratch.org new file mode 100644 index 0000000..d16cc29 --- /dev/null +++ b/research/big_query-scratch.org @@ -0,0 +1,48 @@ +#+TITLE: Big Query Scratch +Goal is a file that will be easy starting place for bq work +* Log in to gcloud +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC +* Make sure we are in the right project in bq +#+BEGIN_SRC tmate :window prepare +bq ls +#+END_SRC +* Run big query queries +*** Run in tmux terminal +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql 'SELECT c_ip, cs_bytes, sc_bytes, cs_referer, time_micros, client_ip, asn, name, name_with_yaml_name FROM (SELECT c_ip, cs_bytes, sc_bytes, cs_referer, time_micros FROM k8s-infra-ii-sandbox.etl_staging.usage_all_20210608 ) A LEFT OUTER JOIN ( SELECT client_ip, asn, name, name_with_yaml_name FROM k8s-infra-ii-sandbox.riaan_data_store.asn_client_ip_lookup ) B ON A.c_ip=B.client_ip LIMIT 10' +#+END_SRC +*** Run in the editor +WARNING WILL FREEZE THE EDITOR UNTIL THE QUERY RETURNS +#+BEGIN_SRC shell +bq query --nouse_legacy_sql 'SELECT c_ip FROM `k8s-infra-ii-sandbox.etl_staging.usage_all_20210608` LIMIT 10' +#+END_SRC + +#+RESULTS: +#+begin_example ++-----------------+ +| c_ip | ++-----------------+ +| 199.101.198.44 | +| 185.65.135.170 | +| 163.172.227.108 | +| 51.159.89.6 | +| 163.172.227.108 | +| 54.76.216.36 | +| 165.225.200.171 | +| 18.205.68.20 | +| 3.8.77.100 | +| 52.28.102.95 | ++-----------------+ +#+end_example + +*** Output to a table +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.asn_client_ip_lookup_org 'SELECT c_ip, cs_bytes, sc_bytes, cs_referer, time_micros, client_ip, asn, name, name_with_yaml_name FROM (SELECT c_ip, cs_bytes, sc_bytes, cs_referer, time_micros FROM k8s-infra-ii-sandbox.etl_staging.usage_all_20210608 ) A LEFT OUTER JOIN ( SELECT client_ip, asn, name, name_with_yaml_name FROM k8s-infra-ii-sandbox.riaan_data_store.asn_client_ip_lookup ) B ON A.c_ip=B.client_ip ' +#+END_SRC diff --git a/research/bq-multiline.org b/research/bq-multiline.org new file mode 100644 index 0000000..e68cbe2 --- /dev/null +++ b/research/bq-multiline.org @@ -0,0 +1,106 @@ +#+TITLE: Writing an SQL-Mode Filter + +* TLDR +** run this code +#+begin_src elisp :noweb yes :results silent +<> +<> +#+end_src + +** multiline bq +#+begin_src sql-mode +select + 1; +#+end_src + +* Links/Documentation about sql-mode filters +** [[https://github.com/nikclayton/ob-sql-mode#ob-sql-mode][ob-sql-mode]] +- https://github.com/nikclayton/ob-sql-mode#ob-sql-mode +* Possibilites +** org-babel-sql-mode-pre-execute-hook +Hook for functions to be called before the query is executed. + +Each function is called with two parameters, BODY is the text of +the SQL statements to be run. PROCESSED-PARAMS is the parameters +to the code block. + +The hook should return a new BODY modified in some way. +https://github.com/nikclayton/ob-sql-mode/blob/master/ob-sql-mode.el#L70-L78 +** org-babel-sql-mode-start-interpreter-prompt +Useful to maybe start the interpreter without asking questions +https://github.com/nikclayton/ob-sql-mode/blob/master/ob-sql-mode.el#L147-L156 +* gcloud auth login +In kitty you will need to control-shift-click on the link, and paste the code back into the terminal. +#+begin_src tmate :window login +gcloud auth login +#+end_src +#+begin_src shell :results silent +gcloud config set project k8s-infra-ii-sandbox +#+end_src +#+begin_src shell :results silent +CREDFILE=$(ls ~/.config/gcloud/legacy_credentials/*/singlestore_bq.json) +echo credential_file = $CREDFILE > ~/.bigqueryrc +#+end_src + +* Setting local vars +** Defaults for sql-mode blocks +#+name: bq-src-block-args +#+begin_src elisp :results silent +(set (make-local-variable 'org-babel-default-header-args:sql-mode) + ;; Set up all sql-mode blocks to be postgres and literate + '((:results . "replace code") + (:product . "bq") + (:session . "none") + (:noweb . "yes") + (:comments . "no") + (:wrap . "SRC example"))) +#+end_src + +** pre-execute-hook +#+name: org-babel-sql-mode-pre-execute-hook +#+begin_src elisp +(setq org-babel-sql-mode-pre-execute-hook nil) +(defun bq-simplify-body (body params) + (message "BODY:") + (message body) + (let* ((oneline (replace-regexp-in-string "\n" "" body)) + ) + (message "ONELINE:") + (message oneline) + (symbol-value 'oneline) + ) + ;;(symbol-value 'body) + + ) +(add-hook 'org-babel-sql-mode-pre-execute-hook 'bq-simplify-body) +#+end_src + +#+RESULTS: +#+begin_src elisp +(bq-simplify-body) +#+end_src + +** Listing the hooks +#+begin_src elisp +(symbol-value 'org-babel-sql-mode-pre-execute-hook) +#+end_src + +#+RESULTS: +#+begin_src elisp +(bq-simplify-body) +#+end_src + +* SQL +#+begin_src sql-mode +select + 1; +#+end_src + +#+RESULTS: +#+begin_SRC example ++-----+ +| f0_ | ++-----+ +| 1 | ++-----+ +#+end_SRC diff --git a/research/dir-local-hooks.org b/research/dir-local-hooks.org index e69b9e4..63ef811 100644 --- a/research/dir-local-hooks.org +++ b/research/dir-local-hooks.org @@ -178,7 +178,7 @@ initializations for terminals that do not have a library. #+end_src ** dir/subdir/test.org #+begin_src org :tangle /tmp/dir/subdir/test.org - # -*- var: (message "file local variable prop line"); -*- + # -*- eval: (message "file local variable prop line"); -*- ,#+TITLE: testing to see what order the vars are executed in # Local Variables: diff --git a/research/distribution/README.org b/research/distribution/README.org new file mode 100644 index 0000000..f6fadb4 --- /dev/null +++ b/research/distribution/README.org @@ -0,0 +1,223 @@ +* Install Distribution (with fs) + +Create basic auth htpasswd: + #+begin_src bash :results silent + kubectl -n distribution create secret generic distribution-auth --from-literal=htpasswd="$(htpasswd -Bbn distribution Distritest1234!)" + #+end_src + +Configure the Distribution deployment: +#+begin_src yaml :tangle distribution-fs.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: distribution +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: distribution-config +data: + config.yml: | + version: 0.1 + log: + accesslog: + disabled: false + level: debug + fields: + service: registry + environment: development + hooks: + - type: mail + disabled: true + levels: + - panic + options: + smtp: + addr: mail.example.com:25 + username: mailuser + password: password + insecure: true + from: sender@example.com + to: + - errors@example.com + auth: + htpasswd: + realm: basic-realm + path: /etc/docker/registry/htpasswd + storage: + delete: + enabled: true + cache: + blobdescriptor: redis + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false + http: + addr: :5000 + secret: asecretforlocaldevelopment + debug: + addr: :5001 + prometheus: + enabled: true + path: /metrics + headers: + X-Content-Type-Options: [nosniff] + redis: + addr: localhost:6379 + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + notifications: + events: + includereferences: true + endpoints: + - name: local-5003 + url: http://localhost:5003/callback + headers: + Authorization: [Bearer ] + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + - name: local-8083 + url: http://localhost:8083/callback + timeout: 1s + threshold: 10 + backoff: 1s + disabled: true + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + proxy: + remoteurl: https://k8s.gcr.io +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: distribution-data + namespace: distribution +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: local-path +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distribution + namespace: distribution +spec: + replicas: 1 + selector: + matchLabels: + app: distribution + template: + metadata: + labels: + app: distribution + spec: + containers: + - name: distribution + image: docker.io/registry:2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 5000 + env: + - name: TZ + value: "Pacific/Auckland" + volumeMounts: + - name: distribution-data + mountPath: /var/lib/registry + - name: distribution-config + mountPath: /etc/docker/registry/config.yml + subPath: config.yml + - name: distribution-auth + mountPath: /etc/docker/registry/htpasswd + subPath: htpasswd + readinessProbe: + tcpSocket: + port: 5000 + initialDelaySeconds: 2 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 5000 + initialDelaySeconds: 1 + periodSeconds: 20 + volumes: + - name: distribution-data + persistentVolumeClaim: + claimName: distribution-data + - name: distribution-config + configMap: + name: distribution-config + - name: distribution-auth + secret: + secretName: distribution-auth +--- +apiVersion: v1 +kind: Service +metadata: + name: distribution + namespace: distribution +spec: + ports: + - port: 5000 + targetPort: 5000 + selector: + app: distribution +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: distribution + namespace: distribution + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: "0" +spec: + tls: + - hosts: + - registry.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod + rules: + - host: registry.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - path: / + backend: + serviceName: distribution + servicePort: 5000 +#+end_src + +Install a basic installation of Distribution: +#+begin_src bash :results silent +envsubst < distribution-fs.yaml | kubectl -n distribution apply -f - +#+end_src + +Restart the deployment rollout if needed: +#+BEGIN_SRC bash :results silent +kubectl -n distribution rollout restart deployment/distribution +#+END_SRC + +Copy the letsencrypt cert for Ingress: +#+begin_src bash :results silent +kubectl -n powerdns get cert letsencrypt-prod -o yaml | sed 's/namespace: powerdns/namespace: distribution/g' | kubectl -n distribution apply -f - +#+end_src + +Login to the registry: +#+begin_src bash :results silent +echo Distritest1234! | docker login registry.$SHARINGIO_PAIR_BASE_DNS_NAME -u distribution --password-stdin +#+end_src diff --git a/research/gcp-credentials-from-serviceaccount.org b/research/gcp-credentials-from-serviceaccount.org new file mode 100644 index 0000000..1d00835 --- /dev/null +++ b/research/gcp-credentials-from-serviceaccount.org @@ -0,0 +1,34 @@ +#+TITLE: Gcp Credentials From Serviceaccount + +Ensure that gcloud is logged into: +#+BEGIN_SRC shell +gcloud auth login +#+END_SRC + +Select the right project +#+BEGIN_SRC shell +read GCP_PROJECT +export GCP_PROJECT + +gcloud config set project $GCP_PROJECT +#+END_SRC + +Get the username for the new ServiceAccount +#+BEGIN_SRC shell +SHARINGIO_PAIR_USER_LOWER=$(echo $SHARINGIO_PAIR_USER | tr '[A-Z]' '[a-z]') +#+END_SRC + +Set the GCP ServiceAccount name +#+BEGIN_SRC shell +GCP_SERVICEACCOUNT_NAME="${SHARINGIO_PAIR_USER_LOWER}@${GCP_PROJECT}.iam.gserviceaccount.com" +#+END_SRC + +Create an owner rolebinding +#+BEGIN_SRC shell +gcloud projects add-iam-policy-binding $GCP_PROJECT --member="serviceAccount:${GCP_SERVICEACCOUNT_NAME}" --role="roles/owner" +#+END_SRC + +Create ServiceAccount keys +#+BEGIN_SRC shell +gcloud iam service-accounts keys create gcp-credentials.json --iam-account=$GCP_SERVICEACCOUNT_NAME +#+END_SRC diff --git a/research/gcp_auditing.org b/research/gcp_auditing.org new file mode 100644 index 0000000..ff4cce7 --- /dev/null +++ b/research/gcp_auditing.org @@ -0,0 +1,358 @@ +#+TITLE: Gcp Auditing +* Gcloud things: +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC + +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +Find sandbox-project-id +#+BEGIN_SRC shell +gcloud projects list | grep k8s-infra-ii-sandbox +#+END_SRC + +#+RESULTS: +#+begin_example +k8s-infra-ii-sandbox k8s-infra-ii-sandbox 631771264409 +#+end_example + +* Config bulk-exporter +Run it and see what we get. +#+BEGIN_SRC tmate :window bulk-exporter-setup +gcloud alpha resource-config bulk-export --resource-format=terraform --project=631771264409 --path=~/gc_d_poc/sandbox_dump +#+END_SRC + +#+BEGIN_EXAMPLE +This command requires the `config-connector` binary to be installed to export GCP resource configurations. +Would you like to install the`config-connector` binary to continue command execution?(Y/n)? +ERROR:(gcloud.alpha.resource-config.bulk-export) +You cannot perform this action because the Cloud SDK component manager is disabled for this installation. +You can run the following command to achieve the same result for this installation: +#+END_EXAMPLE + +Update and install +#+BEGIN_SRC tmate :window bulk-exporter-setup +sudo apt-get update && sudo apt-get install google-cloud-sdk-config-connector +#+END_SRC + +Lets try again +#+BEGIN_SRC tmate :window bulk-exporter-setup +gcloud alpha resource-config bulk-export --resource-format=terraform --project=631771264409 --path=~/gc_d_poc/sandbox_dump +#+END_SRC +#+BEGIN_EXAMPLE +Exporting resource configurations to [/home/ii/gc_d_poc/sandbox_dump]...done. +ERROR: (gcloud.alpha.resource-config.bulk-export) Error executing export:: [error in 'config-connector' version '1.49.1': error creating temporary bucket and prefix: error getting project id: error retrieving gcp sdk credentials: google: could not find default credentials. See https://developers.google.com/accounts/docs/application-default-credentials for more information. +] +#+END_EXAMPLE + +I need to configure my application-default-credentials +#+BEGIN_SRC tmate :window bulk-exporter-setup +gcloud auth application-default login +#+END_SRC + +Lets try again +#+BEGIN_SRC tmate :window bulk-exporter-setup +gcloud alpha resource-config bulk-export --resource-format=terraform --project=631771264409 --path=~/gc_d_poc/sandbox_dump +#+END_SRC +Good that worked, a bit more detail on what I got back below +** A Little bit about the structure of the dump +It creates 3 folders that contain various parts of the project. +#+BEGIN_EXAMPLE +i@bernokl-humacs-0:~/gc_d_poc$ ls -al +total 12 +drwxr-xr-x 3 ii ii 4096 May 14 10:03 . +drwxrwxrwx 13 root root 4096 May 14 10:00 .. +drwxr-xr-x 5 ii ii 4096 May 14 11:45 sandbox_dump +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/ +total 20 +drwxr-xr-x 5 ii ii 4096 May 14 11:45 . +drwxr-xr-x 3 ii ii 4096 May 14 10:03 .. +drwxr-xr-x 3 ii ii 4096 May 14 11:45 758905017065 +drwxr-xr-x 3 ii ii 4096 May 14 11:44 k8s-infra-ii-sandbox +drwxr-xr-x 4 ii ii 4096 May 14 11:45 projects +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/758905017065/ +total 12 +drwxr-xr-x 3 ii ii 4096 May 14 11:45 . +drwxr-xr-x 5 ii ii 4096 May 14 11:45 .. +drwxr-xr-x 2 ii ii 4096 May 14 11:45 Project +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/758905017065/Project/ +total 12 +drwxr-xr-x 2 ii ii 4096 May 14 11:45 . +drwxr-xr-x 3 ii ii 4096 May 14 11:45 .. +-rw-r--r-- 1 ii ii 281 May 14 11:45 k8s-infra-ii-sandbox.tf +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/k8s-infra-ii-sandbox/ +total 12 +drwxr-xr-x 3 ii ii 4096 May 14 11:44 . +drwxr-xr-x 5 ii ii 4096 May 14 11:45 .. +drwxr-xr-x 3 ii ii 4096 May 14 11:44 BigQueryDataset +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/k8s-infra-ii-sandbox/BigQueryDataset/ | tail +total 12 +drwxr-xr-x 3 ii ii 4096 May 14 11:44 . +drwxr-xr-x 3 ii ii 4096 May 14 11:44 .. +drwxr-xr-x 2 ii ii 4096 May 14 11:44 US +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/k8s-infra-ii-sandbox/BigQueryDataset/US/ | tail +total 16 +drwxr-xr-x 2 ii ii 4096 May 14 11:44 . +drwxr-xr-x 3 ii ii 4096 May 14 11:44 .. +-rw-r--r-- 1 ii ii 592 May 14 11:44 k8s-artifacts-gcslogs-appspot.tf +-rw-r--r-- 1 ii ii 581 May 14 11:44 kubernetes-public-logs.tf +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/ +total 16 +drwxr-xr-x 4 ii ii 4096 May 14 11:45 . +drwxr-xr-x 5 ii ii 4096 May 14 11:45 .. +drwxr-xr-x 4 ii ii 4096 May 14 11:45 631771264409 +drwxr-xr-x 10 ii ii 4096 May 14 11:45 k8s-infra-ii-sandbox +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/631771264409/ +total 16 +drwxr-xr-x 4 ii ii 4096 May 14 11:45 . +drwxr-xr-x 4 ii ii 4096 May 14 11:45 .. +drwxr-xr-x 2 ii ii 4096 May 14 11:46 Service +drwxr-xr-x 4 ii ii 4096 May 14 11:46 StorageBucket +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/631771264409/StorageBucket/ +total 16 +drwxr-xr-x 4 ii ii 4096 May 14 11:46 . +drwxr-xr-x 4 ii ii 4096 May 14 11:45 .. +drwxr-xr-x 2 ii ii 4096 May 14 11:46 US +drwxr-xr-x 2 ii ii 4096 May 14 11:45 US-EAST1 +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/631771264409/StorageBucket/US/ +total 16 +drwxr-xr-x 2 ii ii 4096 May 14 11:46 . +drwxr-xr-x 4 ii ii 4096 May 14 11:46 .. +-rw-r--r-- 1 ii ii 309 May 14 11:46 artifacts-k8s-infra-ii-sandbox-appspot-com.tf +-rw-r--r-- 1 ii ii 279 May 14 11:46 export-c2e4nmc5jmg9n5nacc60.tf +ii@bernokl-humacs-0:~/gc_d_poc$ cat sandbox_dump/projects/631771264409/StorageBucket/US/artifacts-k8s-infra-ii-sandbox-appspot-com.tf + +resource "google_storage_bucket" "artifacts_k8s_infra_ii_sandbox_appspot_com" { + force_destroy = false + + labels { + managed-by-cnrm = "true" + } + + location = "US" + name = "artifacts.k8s-infra-ii-sandbox.appspot.com" + project = "projects/631771264409" + storage_class = "STANDARD" +} +ii@bernokl-humacs-0:~/gc_d_poc$ cat sandbox_dump/projects/631771264409/StorageBucket/US/export-c2e4nmc5jmg9n5nacc60.tf +resource "google_storage_bucket" "export_c2e4nmc5jmg9n5nacc60" { + force_destroy = false + + labels { + managed-by-cnrm = "true" + } + + location = "US" + name = "export-c2e4nmc5jmg9n5nacc60" + project = "projects/631771264409" + storage_class = "STANDARD" +} + +i@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/k8s-infra-ii-sandbox/ +total 40 +drwxr-xr-x 10 ii ii 4096 May 14 11:45 . +drwxr-xr-x 4 ii ii 4096 May 14 11:45 .. +drwxr-xr-x 2 ii ii 4096 May 14 11:45 BigQueryTable +drwxr-xr-x 2 ii ii 4096 May 14 11:45 ComputeFirewall +drwxr-xr-x 2 ii ii 4096 May 14 11:45 ComputeImage +drwxr-xr-x 2 ii ii 4096 May 14 11:45 ComputeNetwork +drwxr-xr-x 2 ii ii 4096 May 14 11:45 ComputeRoute +drwxr-xr-x 27 ii ii 4096 May 14 11:46 ComputeSubnetwork +drwxr-xr-x 2 ii ii 4096 May 14 11:45 IAMServiceAccount +drwxr-xr-x 2 ii ii 4096 May 14 11:45 PubSubTopic +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/k8s-infra-ii-sandbox/ComputeImage/ +total 12 +drwxr-xr-x 2 ii ii 4096 May 14 11:45 . +drwxr-xr-x 10 ii ii 4096 May 14 11:45 .. +-rw-r--r-- 1 ii ii 772 May 14 11:45 cluster-api-ubuntu-1804-v1-21-0-1620356019.tf +ii@bernokl-humacs-0:~/gc_d_poc$ cat sandbox_dump/projects/k8s-infra-ii-sandbox/ComputeImage/cluster-api-ubuntu-1804-v1-21-0-1620356019.tf +resource "google_compute_image" "cluster_api_ubuntu_1804_v1_21_0_1620356019" { + description = "Created by Packer" + disk_size_gb = 20 + family = "capi-ubuntu-1804-k8s-v1-21" + + guest_os_features { + type = "SEV_CAPABLE" + } + + guest_os_features { + type = "UEFI_COMPATIBLE" + } + + guest_os_features { + type = "VIRTIO_SCSI_MULTIQUEUE" + } + + labels { + managed-by-cnrm = "true" + } + + licenses = ["https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/licenses/ubuntu-1804-lts"] + name = "cluster-api-ubuntu-1804-v1-21-0-1620356019" + project = "k8s-infra-ii-sandbox" + source_disk = "https://www.googleapis.com/compute/v1/projects/k8s-infra-ii-sandbox/zones/us-central1-a/disks/packer-6094abb4-85d2-3b5d-0690-2c5c59a6c585" +} +ii@bernokl-humacs-0:~/gc_d_poc$ ls -al sandbox_dump/projects/631771264409/StorageBucket/US-EAST1/ +total 12 +drwxr-xr-x 2 ii ii 4096 May 14 11:45 . +drwxr-xr-x 4 ii ii 4096 May 14 11:46 .. +-rw-r--r-- 1 ii ii 393 May 14 11:45 ii-bq-scratch-dump.tf +ii@bernokl-humacs-0:~/gc_d_poc$ cat sandbox_dump/projects/631771264409/StorageBucket/US-EAST1/ii-bq-scratch-dump.tf +resource "google_storage_bucket" "ii_bq_scratch_dump" { + bucket_policy_only = true + force_destroy = false + + labels { + managed-by-cnrm = "true" + } + + location = "US-EAST1" + name = "ii_bq_scratch_dump" + project = "projects/631771264409" + storage_class = "STANDARD" + uniform_bucket_level_access = true +} + +#+END_EXAMPLE + +** Scripting bulk-export to run against multiple projects and create folders for each +Very basic confirm +#+BEGIN_SRC tmate :window bulk-exporter-setup +#PROJ=k8s-infra-ii-sandbox +PROJ=k8s-artifacts-prod +DEST=`pwd`/$PROJ +mkdir $DEST +gcloud alpha resource-config bulk-export --resource-format=terraform --project=$PROJ --path=$DEST +#+END_SRC + +Create a file with all our projects +#+BEGIN_SRC tmate :window bulk-exporter-setup +gcloud projects list | awk '{ print $1}' | grep -v bosk | grep -v PROJ | grep -v apisnoo | grep -v "sys-" > /tmp/gcloud_projects.txt +#+END_SRC + +Lets make a 3 org subset for testing: +#+BEGIN_SRC tmate :window bulk-export-setup +head -3 /tmp/gcloud_projects.txt > /tmp/gcloud_projects_short.txt +#+END_SRC + +#+BEGIN_SRC shell :tangle (concat (getenv "HOME") "/gc_d_poc/bulk-exporter.sh") +#!/bin/bash +FNAME='/tmp/gcloud_projects_short.txt' +n=1 +while read line; do +DEST=`pwd`/$line + echo -e 'project:' $line '\n' + echo -e 'dir:' $DEST '\n' + echo -e 'Count:' $n '\n' + echo -e '\n' +mkdir -p $DEST + echo 'y' | gcloud alpha resource-config bulk-export --resource-format=terraform --project=$line --path=$DEST +n=$((n+1)) +done < $FNAME +#+END_SRC + + +#+BEGIN_SRC tmate :window bulk-export-setup +chmod +x ~/gc_d_poc/bulk-exporter.sh +#+END_SRC +Lets run it! +#+BEGIN_SRC tmate :window bulk-export-setup +. ~/gc_d_poc/bulk-exporter.sh +#+END_SRC + +Script seems to work, but we have permissions issues +I raised the issue on: https://github.com/kubernetes/k8s.io/issues/1981 + +tldr; we need cloudasset.googleapis.com enabled +#+BEGIN_EXAMPLE +gcloud alpha resource-config bulk-export --resource-format=terraform --project=$PROJ --path=$DEST +API [cloudasset.googleapis.com] is required to continue, but is not +enabled on project [k8s-artifacts-prod]. Would you like to enable and +retry (this will take a few minutes)? (y/N)? y + +Enabling service [cloudasset.googleapis.com] on project [k8s-artifacts-prod]... +ERROR: (gcloud.alpha.resource-config.bulk-export) PERMISSION_DENIED: Permission denied to enable service [cloudasset.googleapis.com] +- '@type': type.googleapis.com/google.rpc.PreconditionFailure + violations: + - subject: '110002' + type: googleapis.com +- '@type': type.googleapis.com/google.rpc.ErrorInfo + domain: serviceusage.googleapis.com + reason: AUTH_PERMISSION_DENIED +#+END_EXAMPLE +** Enable cloudasset on staging-apisnoop +We "control" 2 projects k8s-staging-apisnoop and k8s-infra-ii-sandbox. +Test creating pr for permissions needed on apisnoop +Result would be to run the bulk export against staging-apisnoop +When asked for cloudasset follow existing process to create pr for permissions update on our project + +*** Confirm current project services available +k8s-infra-ii-sandbox +#+begin_src shell +gcloud services list --project=k8s-infra-ii-sandbox +#+end_src +Notice cloudasset.googleapis.com in the results +#+RESULTS: +#+begin_example +NAME TITLE +bigquery.googleapis.com BigQuery API +bigqueryconnection.googleapis.com BigQuery Connection API +bigquerydatatransfer.googleapis.com BigQuery Data Transfer API +bigqueryreservation.googleapis.com BigQuery Reservation API +bigquerystorage.googleapis.com BigQuery Storage API +cloudasset.googleapis.com Cloud Asset API +cloudbuild.googleapis.com Cloud Build API +cloudresourcemanager.googleapis.com Cloud Resource Manager API +compute.googleapis.com Compute Engine API +container.googleapis.com Kubernetes Engine API +containeranalysis.googleapis.com Container Analysis API +containerregistry.googleapis.com Container Registry API +iam.googleapis.com Identity and Access Management (IAM) API +iamcredentials.googleapis.com IAM Service Account Credentials API +logging.googleapis.com Cloud Logging API +monitoring.googleapis.com Cloud Monitoring API +oslogin.googleapis.com Cloud OS Login API +pubsub.googleapis.com Cloud Pub/Sub API +storage-api.googleapis.com Google Cloud Storage JSON API +storage-component.googleapis.com Cloud Storage +#+end_example + +k8s-staging-apisnoop +#+begin_src shell +gcloud services list --project=k8s-staging-apisnoop +#+end_src + +Notice no cloudasset. +#+RESULTS: +#+begin_example +NAME TITLE +cloudbuild.googleapis.com Cloud Build API +cloudkms.googleapis.com Cloud Key Management Service (KMS) API +containeranalysis.googleapis.com Container Analysis API +containerregistry.googleapis.com Container Registry API +containerscanning.googleapis.com Container Scanning API +logging.googleapis.com Cloud Logging API +pubsub.googleapis.com Cloud Pub/Sub API +secretmanager.googleapis.com Secret Manager API +storage-api.googleapis.com Google Cloud Storage JSON API +storage-component.googleapis.com Cloud Storage +#+end_example + + + +#+BEGIN_SRC tmate :window bulk-exporter-test +#PROJ=k8s-infra-ii-sandbox +PROJ=k8s-staging-apisnoop +DEST=~/foo/$PROJ +mkdir -p $DEST +gcloud alpha resource-config bulk-export --resource-format=terraform --project=$PROJ --path=$DEST +#+END_SRC + +OK this asked me if I want to enable cloudasset.googleapis.com +I want to find the right way to have this service enabled in my project +Looking through k8s.io to see what exists for adding a resource to a project +Looking in gcloud docs to find best way to add/enable service for a project diff --git a/research/harbor/README.org b/research/harbor/README.org new file mode 100644 index 0000000..182e4f2 --- /dev/null +++ b/research/harbor/README.org @@ -0,0 +1,782 @@ +#+TITLE: Install Harbor using Helm on a pair instance + +#+BEGIN_SRC bash :results silent +helm repo add fluxcd https://charts.fluxcd.io +kubectl apply -f https://raw.githubusercontent.com/fluxcd/helm-operator/1.2.0/deploy/crds.yaml +# Only Helm 3 support enabled using helm.versions +helm upgrade -i helm-operator fluxcd/helm-operator \ + --namespace helm-operator \ + --create-namespace \ + --set helm.versions=v3 +#+END_SRC + +#+BEGIN_SRC yaml :tangle ./harbor.yaml +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: harbor + namespace: harbor + chart: + repository: https://helm.goharbor.io + name: harbor + version: 1.6.0 + values: + expose: + # Set the way how to expose the service. Set the type as "ingress", + # "clusterIP", "nodePort" or "loadBalancer" and fill the information + # in the corresponding section + type: ingress + tls: + # Enable the tls or not. + # Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress" + # Note: if the "expose.type" is "ingress" and the tls + # is disabled, the port must be included in the command when pull/push + # images. Refer to https://github.com/goharbor/harbor/issues/5291 + # for the detail. + enabled: true + # The source of the tls certificate. Set it as "auto", "secret" + # or "none" and fill the information in the corresponding section + # 1) auto: generate the tls certificate automatically + # 2) secret: read the tls certificate from the specified secret. + # The tls certificate can be generated manually or by cert manager + # 3) none: configure no tls certificate for the ingress. If the default + # tls certificate is configured in the ingress controller, choose this option + certSource: secret + auto: + # The common name used to generate the certificate, it's necessary + # when the type isn't "ingress" + commonName: "" + secret: + # The name of secret which contains keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + secretName: "letsencrypt-prod" + # The name of secret which contains keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + # Only needed when the "expose.type" is "ingress". + notarySecretName: "letsencrypt-prod" + ingress: + hosts: + core: harbor.${SHARINGIO_PAIR_BASE_DNS_NAME} + notary: notary.harbor.${SHARINGIO_PAIR_BASE_DNS_NAME} + # set to the type of ingress controller if it has specific requirements. + # leave as `default` for most ingress controllers. + # set to `gce` if using the GCE ingress controller + # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller + controller: default + annotations: + # note different ingress controllers may require a different ssl-redirect annotation + # for Envoy, use ingress.kubernetes.io/force-ssl-redirect: "true" and remove the nginx lines below + ingress.kubernetes.io/ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + clusterIP: + # The name of ClusterIP service + name: harbor + ports: + # The service port Harbor listens on when serving with HTTP + httpPort: 80 + # The service port Harbor listens on when serving with HTTPS + httpsPort: 443 + # The service port Notary listens on. Only needed when notary.enabled + # is set to true + notaryPort: 4443 + nodePort: + # The name of NodePort service + name: harbor + ports: + http: + # The service port Harbor listens on when serving with HTTP + port: 80 + # The node port Harbor listens on when serving with HTTP + nodePort: 30002 + https: + # The service port Harbor listens on when serving with HTTPS + port: 443 + # The node port Harbor listens on when serving with HTTPS + nodePort: 30003 + # Only needed when notary.enabled is set to true + notary: + # The service port Notary listens on + port: 4443 + # The node port Notary listens on + nodePort: 30004 + loadBalancer: + # The name of LoadBalancer service + name: harbor + # Set the IP if the LoadBalancer supports assigning IP + IP: "" + ports: + # The service port Harbor listens on when serving with HTTP + httpPort: 80 + # The service port Harbor listens on when serving with HTTPS + httpsPort: 443 + # The service port Notary listens on. Only needed when notary.enabled + # is set to true + notaryPort: 4443 + annotations: {} + sourceRanges: [] + + # The external URL for Harbor core service. It is used to + # 1) populate the docker/helm commands showed on portal + # 2) populate the token service URL returned to docker/notary client + # + # Format: protocol://domain[:port]. Usually: + # 1) if "expose.type" is "ingress", the "domain" should be + # the value of "expose.ingress.hosts.core" + # 2) if "expose.type" is "clusterIP", the "domain" should be + # the value of "expose.clusterIP.name" + # 3) if "expose.type" is "nodePort", the "domain" should be + # the IP address of k8s node + # + # If Harbor is deployed behind the proxy, set it as the URL of proxy + externalURL: https://harbor.${SHARINGIO_PAIR_BASE_DNS_NAME} + + # The internal TLS used for harbor components secure communicating. In order to enable https + # in each components tls cert files need to provided in advance. + internalTLS: + # If internal TLS enabled + enabled: false + # There are three ways to provide tls + # 1) "auto" will generate cert automatically + # 2) "manual" need provide cert file manually in following value + # 3) "secret" internal certificates from secret + certSource: "auto" + # The content of trust ca, only available when `certSource` is "manual" + trustCa: "" + # core related cert configuration + core: + # secret name for core's tls certs + secretName: "" + # Content of core's TLS cert file, only available when `certSource` is "manual" + crt: "" + # Content of core's TLS key file, only available when `certSource` is "manual" + key: "" + # jobservice related cert configuration + jobservice: + # secret name for jobservice's tls certs + secretName: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + key: "" + # registry related cert configuration + registry: + # secret name for registry's tls certs + secretName: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + key: "" + # portal related cert configuration + portal: + # secret name for portal's tls certs + secretName: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + key: "" + # chartmuseum related cert configuration + chartmuseum: + # secret name for chartmuseum's tls certs + secretName: "" + # Content of chartmuseum's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of chartmuseum's TLS key file, only available when `certSource` is "manual" + key: "" + # trivy related cert configuration + trivy: + # secret name for trivy's tls certs + secretName: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + key: "" + + # The persistence is enabled by default and a default StorageClass + # is needed in the k8s cluster to provision volumes dynamicly. + # Specify another StorageClass in the "storageClass" or set "existingClaim" + # if you have already existing persistent volumes to use + # + # For storing images and charts, you can also use "azure", "gcs", "s3", + # "swift" or "oss". Set it in the "imageChartStorage" section + persistence: + enabled: true + # Setting it to "keep" to avoid removing PVCs during a helm delete + # operation. Leaving it empty will delete PVCs after the chart deleted + # (this does not apply for PVCs that are created for internal database + # and redis components, i.e. they are never deleted automatically) + resourcePolicy: "keep" + persistentVolumeClaim: + registry: + # Use the existing PVC which must be created manually before bound, + # and specify the "subPath" if the PVC is shared with other components + existingClaim: "" + # Specify the "storageClass" used to provision the volume. Or the default + # StorageClass will be used(the default). + # Set it to "-" to disable dynamic provisioning + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + chartmuseum: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + jobservice: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 1Gi + # If external database is used, the following settings for database will + # be ignored + database: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 1Gi + # If external Redis is used, the following settings for Redis will + # be ignored + redis: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 1Gi + trivy: + existingClaim: "" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + # Define which storage backend is used for registry and chartmuseum to store + # images and charts. Refer to + # https://github.com/docker/distribution/blob/master/docs/configuration.md#storage + # for the detail. + imageChartStorage: + # Specify whether to disable `redirect` for images and chart storage, for + # backends which not supported it (such as using minio for `s3` storage type), please disable + # it. To disable redirects, simply set `disableredirect` to `true` instead. + # Refer to + # https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect + # for the detail. + disableredirect: false + # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate. + # The secret must contain keys named "ca.crt" which will be injected into the trust store + # of registry's and chartmuseum's containers. + # caBundleSecretName: + + # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift", + # "oss" and fill the information needed in the corresponding section. The type + # must be "filesystem" if you want to use persistent volumes for registry + # and chartmuseum + type: filesystem + filesystem: + rootdirectory: /storage + #maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + #realm: core.windows.net + gcs: + bucket: bucketname + # The base64 encoded json file which contains the key + encodedkey: base64-encoded-json-key-file + #rootdirectory: /gcs/object/name/prefix + #chunksize: "5242880" + s3: + region: us-west-1 + bucket: bucketname + #accesskey: awsaccesskey + #secretkey: awssecretkey + #regionendpoint: http://myobjects.local + #encrypt: false + #keyid: mykeyid + #secure: true + #skipverify: false + #v4auth: true + #chunksize: "5242880" + #rootdirectory: /s3/object/name/prefix + #storageclass: STANDARD + #multipartcopychunksize: "33554432" + #multipartcopymaxconcurrency: 100 + #multipartcopythresholdsize: "33554432" + swift: + authurl: https://storage.myprovider.com/v3/auth + username: username + password: password + container: containername + #region: fr + #tenant: tenantname + #tenantid: tenantid + #domain: domainname + #domainid: domainid + #trustid: trustid + #insecureskipverify: false + #chunksize: 5M + #prefix: + #secretkey: secretkey + #accesskey: accesskey + #authversion: 3 + #endpointtype: public + #tempurlcontainerkey: false + #tempurlmethods: + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: regionname + bucket: bucketname + #endpoint: endpoint + #internal: false + #encrypt: false + #secure: true + #chunksize: 10M + #rootdirectory: rootdirectory + + imagePullPolicy: IfNotPresent + + # Use this set to assign a list of default pullSecrets + imagePullSecrets: + # - name: docker-registry-secret + # - name: internal-registry-secret + + # The update strategy for deployments with persistent volumes(jobservice, registry + # and chartmuseum): "RollingUpdate" or "Recreate" + # Set it as "Recreate" when "RWM" for volumes isn't supported + updateStrategy: + type: RollingUpdate + + # debug, info, warning, error or fatal + logLevel: info + + # The initial password of Harbor admin. Change it from portal after launching Harbor + harborAdminPassword: "Harbor12345" + + # The name of the secret which contains key named "ca.crt". Setting this enables the + # download link on portal to download the certificate of CA when the certificate isn't + # generated automatically + caSecretName: "" + + # The secret key used for encryption. Must be a string of 16 chars. + secretKey: "not-a-secure-key" + + # The proxy settings for updating trivy vulnerabilities from the Internet and replicating + # artifacts from/to the registries that cannot be reached directly + proxy: + httpProxy: + httpsProxy: + noProxy: 127.0.0.1,localhost,.local,.internal + components: + - core + - jobservice + - trivy + + # The custom ca bundle secret, the secret must contain key named "ca.crt" + # which will be injected into the trust store for chartmuseum, core, jobservice, registry, trivy components + # caBundleSecretName: "" + + ## UAA Authentication Options + # If you're using UAA for authentication behind a self-signed + # certificate you will need to provide the CA Cert. + # Set uaaSecretName below to provide a pre-created secret that + # contains a base64 encoded CA Certificate named `ca.crt`. + # uaaSecretName: + + # If expose the service via "ingress", the Nginx will not be used + nginx: + image: + repository: goharbor/nginx-photon + tag: v2.2.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + + portal: + image: + repository: goharbor/harbor-portal + tag: v2.2.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + + core: + image: + repository: goharbor/harbor-core + tag: v2.2.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + replicas: 1 + ## Startup probe values + startupProbe: + enabled: true + initialDelaySeconds: 10 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Secret is used when core server communicates with other components. + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # Fill the name of a kubernetes secret if you want to use your own + # TLS certificate and private key for token encryption/decryption. + # The secret must contain keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + # The default key pair will be used if it isn't set + secretName: "" + # The XSRF key. Will be generated automatically if it isn't specified + xsrfKey: "" + + jobservice: + image: + repository: goharbor/harbor-jobservice + tag: v2.2.0 + replicas: 1 + # set the service account to be used, default if left empty + serviceAccountName: "" + maxJobWorkers: 10 + # The logger for jobs: "file", "database" or "stdout" + jobLoggers: + - file + # - database + # - stdout + + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Secret is used when job service communicates with other components. + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + + registry: + # set the service account to be used, default if left empty + serviceAccountName: "" + registry: + image: + repository: goharbor/registry-photon + tag: v2.2.0 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + controller: + image: + repository: goharbor/harbor-registryctl + tag: v2.2.0 + + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + replicas: 1 + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Secret is used to secure the upload state from client + # and registry storage backend. + # See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. + relativeurls: false + credentials: + username: "harbor_registry_user" + password: "harbor_registry_password" + # If you update the username or password of registry, make sure use cli tool htpasswd to generate the bcrypt hash + # e.g. "htpasswd -nbBC10 $username $password" + # htpasswd: "harbor_registry_user:$2y$10$9L4Tc0DJbFFMB6RdSCunrOpTHdwhid4ktBJmLD00bYgqkkGOvll3m" + + middleware: + enabled: false + type: cloudFront + cloudFront: + baseurl: example.cloudfront.net + keypairid: KEYPAIRID + duration: 3000s + ipfilteredby: none + # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key + # that allows access to CloudFront + privateKeySecret: "my-secret" + + chartmuseum: + enabled: true + # set the service account to be used, default if left empty + serviceAccountName: "" + # Harbor defaults ChartMuseum to returning relative urls, if you want using absolute url you should enable it by change the following value to 'true' + absoluteUrl: false + image: + repository: goharbor/chartmuseum-photon + tag: v2.2.0 + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + + trivy: + # enabled the flag to enable Trivy scanner + enabled: true + image: + # repository the repository for Trivy adapter image + repository: goharbor/trivy-adapter-photon + # tag the tag for Trivy adapter image + tag: v2.2.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + # replicas the number of Pod replicas + replicas: 1 + # debugMode the flag to enable Trivy debug mode with more verbose scanning log + debugMode: false + # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`. + vulnType: "os,library" + # severity a comma-separated list of severities to be checked + severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" + # ignoreUnfixed the flag to display only fixed vulnerabilities + ignoreUnfixed: false + # insecure the flag to skip verifying registry certificate + insecure: false + # gitHubToken the GitHub access token to download Trivy DB + # + # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. + # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached + # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update + # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. + # Currently, the database is updated every 12 hours and published as a new release to GitHub. + # + # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough + # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 + # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult + # https://developer.github.com/v3/#rate-limiting + # + # You can create a GitHub token by following the instructions in + # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line + gitHubToken: "" + # skipUpdate the flag to disable Trivy DB downloads from GitHub + # + # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. + # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the + # `/home/scanner/.cache/trivy/db/trivy.db` path. + skipUpdate: false + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 1 + memory: 1Gi + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + + notary: + enabled: true + server: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/notary-server-photon + tag: v2.2.0 + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + signer: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/notary-signer-photon + tag: v2.2.0 + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Fill the name of a kubernetes secret if you want to use your own + # TLS certificate authority, certificate and private key for notary + # communications. + # The secret must contain keys named ca.crt, tls.crt and tls.key that + # contain the CA, certificate and private key. + # They will be generated if not set. + secretName: "" + + database: + # if external database is used, set "type" to "external" + # and fill the connection informations in "external" section + type: internal + internal: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/harbor-db + tag: v2.2.0 + # The initial superuser password for internal database + password: "changeit" + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + external: + host: "192.168.0.1" + port: "5432" + username: "user" + password: "password" + coreDatabase: "registry" + notaryServerDatabase: "notary_server" + notarySignerDatabase: "notary_signer" + # "disable" - No SSL + # "require" - Always SSL (skip verification) + # "verify-ca" - Always SSL (verify that the certificate presented by the + # server was signed by a trusted CA) + # "verify-full" - Always SSL (verify that the certification presented by the + # server was signed by a trusted CA and the server host name matches the one + # in the certificate) + sslmode: "disable" + # The maximum number of connections in the idle connection pool. + # If it <=0, no idle connections are retained. + maxIdleConns: 50 + # The maximum number of open connections to the database. + # If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 1024 for postgre of harbor. + maxOpenConns: 1000 + ## Additional deployment annotations + podAnnotations: {} + + redis: + # if external Redis is used, set "type" to "external" + # and fill the connection informations in "external" section + type: internal + internal: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/redis-photon + tag: v2.2.0 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + external: + # support redis, redis+sentinel + # addr for redis: : + # addr for redis+sentinel: :,:,: + addr: "192.168.0.2:6379" + # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel + sentinelMasterSet: "" + # The "coreDatabaseIndex" must be "0" as the library Harbor + # used doesn't support configuring it + coreDatabaseIndex: "0" + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + chartmuseumDatabaseIndex: "3" + trivyAdapterIndex: "5" + password: "" + ## Additional deployment annotations + podAnnotations: {} + + exporter: + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + podAnnotations: {} + serviceAccountName: "" + image: + repository: goharbor/harbor-exporter + tag: v2.2.0 + nodeSelector: {} + tolerations: [] + affinity: {} + cacheDuration: 30 + cacheCleanInterval: 14400 + + metrics: + enabled: false + core: + path: /metrics + port: 8001 + registry: + path: /metrics + port: 8001 + exporter: + path: /metrics + port: 8001 + +#+END_SRC + +#+begin_src bash :results silent +kubectl create ns harbor +#+end_src + +#+begin_src bash :results silent +envsubst < harbor.yaml | kubectl -n harbor apply -f - +#+end_src + +#+begin_src bash :results silent +kubectl -n powerdns get cert letsencrypt-prod -o yaml | sed 's/namespace: powerdns/namespace: harbor/g' | kubectl -n harbor apply -f - +#+end_src diff --git a/research/ii-gcp-capi.org b/research/ii-gcp-capi.org new file mode 100644 index 0000000..12f8f23 --- /dev/null +++ b/research/ii-gcp-capi.org @@ -0,0 +1,167 @@ +#+TITLE: ii GCP CAPI + +* Image builder + +Clone +#+BEGIN_SRC shell +git clone https://github.com/kubernetes-sigs/image-builder +cd image-builder/images/capi +#+END_SRC + +Install dependencies +#+BEGIN_SRC +sudo apt update +sudo apt install -y ansible +make deps-gce +#+END_SRC + +Ensure Kubernetes v1.21.0 +#+BEGIN_SRC shell +find packer/config/ -type f -exec sed -i -e 's/1.18.15/1.21.0/g' {} \; +find packer/config/ -type f -exec sed -i -e 's/1.18/1.21/g' {} \; +#+END_SRC + +Set Env +#+BEGIN_SRC shell +read GCP_PROJECT_ID +export GCP_PROJECT_ID + +export GOOGLE_APPLICATION_CREDENTIALS=$HOME/gcp-credentials.json +#+END_SRC + +Build GCE image +#+BEGIN_SRC +make build-gce-ubuntu-1804 +#+END_SRC + +The image will now be available as something like /projects/$GCP_PROJECT_ID/global/images/cluster-api-ubuntu-1804-v1-21-0-1620356019/ + +* Deploying + +#+BEGIN_SRC yaml :tangle ~/cluster-template.yaml +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: GCPCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: GCPCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + project: "${GCP_PROJECT}" + region: "${GCP_REGION}" + network: + name: "${GCP_NETWORK_NAME}" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + infrastructureTemplate: + kind: GCPMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + initConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname.split(".")[0] }}' + kubeletExtraArgs: + cloud-provider: gce + clusterConfiguration: + apiServer: + timeoutForControlPlane: 20m + extraArgs: + cloud-provider: gce + controllerManager: + extraArgs: + cloud-provider: gce + allocate-node-cidrs: "false" + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname.split(".")[0] }}' + kubeletExtraArgs: + cloud-provider: gce + version: "${KUBERNETES_VERSION}" +--- +kind: GCPMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + instanceType: "${GCP_CONTROL_PLANE_MACHINE_TYPE}" + image: "${GCP_IMAGE_SELFLINK}" +--- +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + failureDomain: "${GCP_REGION}-a" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 + kind: GCPMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 +kind: GCPMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + instanceType: "${GCP_NODE_MACHINE_TYPE}" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-md-0" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.local_hostname.split(".")[0] }}' + kubeletExtraArgs: + cloud-provider: gce +#+END_SRC + +Prepare Env +#+BEGIN_SRC shell +export GCP_CONTROL_PLANE_MACHINE_TYPE=e2-standard-2 GCP_NETWORK_NAME=default GCP_NODE_MACHINE_TYPE=e2-standard-2 GCP_PROJECT=$GCP_PROJECT_ID GCP_REGION=us-east1 GCP_IMAGE_SELFLINK=projects/$GCP_PROJECT_ID/global/images/cluster-api-ubuntu-1804-v1-21-0-1620356019 +#+END_SRC + +Template +#+BEGIN_SRC shell +clusterctl config cluster --from ~/cluster-template.yaml ii-sandbox-test --kubernetes-version v1.21.0 --control-plane-machine-count=1 --worker-machine-count=0 --target-namespace=gcp-test > ii-sandbox-test.yaml +#+END_SRC diff --git a/research/iiusb/Dockerfile b/research/iiusb/Dockerfile new file mode 100644 index 0000000..de505b2 --- /dev/null +++ b/research/iiusb/Dockerfile @@ -0,0 +1,146 @@ +FROM iiusb:base +RUN echo 'Acquire::http { Proxy "http://192.168.1.15:8000"; }' \ + | sudo tee -a /etc/apt/apt.conf.d/proxy \ + && export DEBIAN_FRONTEND=noninteractive \ + && locale-gen --purge en_US.UTF-8 \ + && sed -i s:restricted:restricted\ universe: /etc/apt/sources.list \ + && apt-get update \ + && apt-get install -yq \ + apt-file \ + apt-transport-https \ + aptitude \ + awesome \ + autoconf \ + build-essential \ + byzanz \ + ca-certificates \ + debian-goodies \ + curl \ + figlet \ + git \ + gpg \ + gtk-redshift \ + htop \ + jq \ + kvmtool \ + linux-headers-generic \ + locales \ + lolcat \ + macchanger \ + nmap \ + pass \ + qemu-kvm \ + sakura \ + silversearcher-ag \ + software-properties-common \ + strace \ + sudo \ + texinfo \ + thunar \ + tig \ + tmate \ + tmux \ + tshark \ + unzip \ + whois \ + wireshark \ + vlc \ + xfonts-terminus \ + xfonts-terminus-dos \ + xfonts-terminus-oblique \ + zfs-initramfs \ + zfsutils-linux \ + zip + +RUN apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys 6A030B21BA07F4FB \ + && add-apt-repository \ + "deb http://apt.kubernetes.io/ kubernetes-xenial main" \ + && apt-get install -y \ + kubelet kubeadm kubectl + +RUN add-apt-repository \ + "deb http://packages.cloud.google.com/apt cloud-sdk-disco main" \ + && apt-get install -y \ + google-cloud-sdk + +RUN apt-key adv \ + --recv-keys 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 \ + && add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu cosmic stable" \ + && apt-get install -y \ + docker-ce + +RUN apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E \ + && add-apt-repository \ + "deb http://ppa.launchpad.net/longsleep/golang-backports/ubuntu cosmic main" \ + && apt-get install -y \ + golang-1.12 \ + && echo 'export PATH=$PATH:$HOME/go/bin' \ + > /etc/profile.d/homedir-go-path.sh \ + && echo 'export PATH=/usr/lib/go-1.12/bin:$PATH' \ + > /etc/profile.d/system-go-path.sh + +RUN curl -L \ + https://storage.googleapis.com/kubernetes-helm/helm-v2.14.1-linux-amd64.tar.gz \ + | tar xvz -f - --strip-components 1 -C /usr/local/bin linux-amd64/helm linux-amd64/tiller + +RUN apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys 9FD3B784BC1C6FC31A8A0A1C1655A0AB68576280 \ + && add-apt-repository \ + "deb https://deb.nodesource.com/node_11.x disco main" \ + && apt-get install -y \ + nodejs + +RUN apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys EB4C1BFD4F042F6DDDCCEC917721F63BD38B4796 \ + && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" \ + > /etc/apt/sources.list.d/google-chrome.list \ + && apt-get update \ + && apt-get install -y \ + google-chrome-stable + +RUN apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys 59FDA1CE1B84B3FAD89366C027557F056DC33CA5 \ + && add-apt-repository \ + "deb http://ppa.launchpad.net/fish-shell/release-3/ubuntu disco main" \ + && apt-get install -y \ + fish + +RUN apt-get install -y ibus libgl1-mesa-glx \ + && apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys 396060CADD8A75220BFCB369B903BF1861A7C71D \ + && curl -L -o /tmp/zoom.deb \ + https://zoom.us/client/latest/zoom_amd64.deb \ + && dpkg -i /tmp/zoom.deb \ + && rm /tmp/zoom.deb + +RUN apt-get install -y emacs + +RUN apt-key adv \ + --keyserver keyserver.ubuntu.com \ + --recv-keys B9F8D658297AF3EFC18D5CDFA2F683C52980AECF \ + && add-apt-repository \ + "deb http://download.virtualbox.org/virtualbox/debian bionic contrib" \ + && apt-get install -y \ + virtualbox-6.0 + +RUN git clone --depth 1 --recurse-submodules \ + https://github.com/iimacs/site-lisp \ + /usr/local/share/emacs/site-lisp + +RUN emacs --batch -l /usr/local/share/emacs/site-lisp/default.el +RUN mv /root/.emacs.d /root/.spacemacs /etc/skel \ + && rm -f /etc/skel/.emacs.d/elpa/gnupg/S.gpg-agent* + +RUN cd /tmp \ + && wget https://github.com/jgm/pandoc/releases/download/2.7.3/pandoc-2.7.3-1-amd64.deb \ + && dpkg -i pandoc-2.7.3-1-amd64.deb \ + && rm pandoc-2.7.3-1-amd64.deb diff --git a/research/iiusb/zfs-usb.org b/research/iiusb/zfs-usb.org new file mode 100644 index 0000000..8e83e5f --- /dev/null +++ b/research/iiusb/zfs-usb.org @@ -0,0 +1,168 @@ +* Prerequisites +#+BEGIN_SRC tmate +apt-get install -y dosfstools +#+END_SRC + +* Format USB Partitions +** Create blank file +#+BEGIN_SRC tmate +truncate -s 7G usb-disk.img +#+END_SRC +** Setup file as loopback device +#+BEGIN_SRC tmate +LOOP_DEVICE=$(sudo losetup -fP --show usb-disk.img) +#+END_SRC + +#+BEGIN_SRC shell +sudo losetup -a +#+END_SRC +sudo curl https://download.docker.com/linux/raspbian/gpg | apt-key add - +#+RESULTS: +#+begin_EXAMPLE +/dev/loop0: [0045]:42668 (/zfs/home/hh/usb-disk.img) +#+end_EXAMPLE + +** Partition for ESP and ZFS +#+BEGIN_SRC tmate +sudo parted -s -a optimal $LOOP_DEVICE -- mklabel gpt \ + mkpart USBOOTME fat32 1 50MiB \ + mkpart USBZFS zfs 50MiB -1 \ + set 1 bios_grub on +#+END_SRC + +** Create zpool + https://wiki.archlinux.org/index.php/ZFS +#+BEGIN_SRC tmate + UUID=$(sudo lsblk --noheadings --output PARTUUID ${LOOP_DEVICE}p2) + POOLNAME=loopyloo + sudo zpool create \ + -o ashift=12 \ + -d \ + -o feature@async_destroy=enabled \ + -o feature@bookmarks=enabled \ + -o feature@embedded_data=enabled \ + -o feature@empty_bpobj=enabled \ + -o feature@enabled_txg=enabled \ + -o feature@extensible_dataset=enabled \ + -o feature@filesystem_limits=enabled \ + -o feature@hole_birth=enabled \ + -o feature@large_blocks=enabled \ + -o feature@lz4_compress=enabled \ + -o feature@spacemap_histogram=enabled \ + -o feature@userobj_accounting=enabled \ + $POOLNAME \ + /dev/disk/by-partuuid/$UUID + sudo zpool upgrade $POOLNAME + sudo zpool set autoexpand=on $POOLNAME + # parted DISK resizepart 2 -1 + # zpool online -e tank sdb +#+END_SRC + +** ZFS Tuning +#+BEGIN_SRC tmate +sudo zfs set atime=off $POOLNAME +sudo zfs set compression=on $POOLNAME +#+END_SRC + +#+BEGIN_SRC shell +# Possibly if you want tmp to sustain across reboots in ZFS +systemctl mask tmp.mount +sudo zfs create \ + -o setuid=off \ + -o devices=off \ + -o sync=disabled \ + -o mountpoint=legacy \ + $POOLNAME/tmp +#+END_SRC +** ZFS Ram limiting +#+NAME: reduce name usage to 512MB kernel param +#+BEGIN_EXAMPLE +zfs.zfs_arc_max=536870912 +#+END_EXAMPLE + +* Bootstrap OS onto ZFS volume +** Create ZFS volumes for home and OS +#+BEGIN_SRC tmate +export POOLNAME=loopyloo +sudo zfs create $POOLNAME/usbhome +sudo zfs create $POOLNAME/usbdisco +#+END_SRC + +** debootsrap +#+BEGIN_SRC tmate + # nvidia-kernel-source,\ + # nvidia-driver-binary,\ + # nvidia-dkms-kernel,\ + # linux-modules-nvidia-390-lowlatency,\ + sudo debootstrap \ + --verbose \ + --components main,multiverse,restricted,universe \ + --arch amd64 \ + --merged-usr \ + --keep-debootstrap-dir \ + --include aptitude,\ + apt-file,\ + apt-transport-https,\ + aptitude,\ + build-essential,\ + curl,\ + emacs,\ + dbus-user-session,\ + jq,\ + silversearcher-ag,\ + grub-efi,\ + grub-efi-amd64-bin,\ + grub-pc-bin,\ + grub-ipxe,\ + grub-common,\ + nvidia-dkms-418,\ + xserver-xorg-video-nvidia-418,\ + nvidia-kernel-source-418,\ + nvidia-dkms-418,\ + nvidia-driver-418,\ + nvidia-utils-418,\ + linux-image-generic,\ + linux-headers-generic,\ + nvidia-modprobe,\ + nvidia-settings,\ + linux-firmware,\ + software-properties-common,\ + gnupg2,\ + strace,\ + tmux,\ + tmate,\ + unzip,\ + whois,\ + whowatch,\ + mailutils,\ + zfsutils-linux,\ + zfs-dkms,\ + zfs-initramfs \ + disco \ + bootstrap \ + http://ucmirror.canterbury.ac.nz/linux/ubuntu-updates \ + /usr/share/debootstrap/scripts/gutsy + #/$POOLNAME/usbdisco \ +#+END_SRC +** instal grub +#+BEGIN_SRC tmate +mount SOMETHING /esp +# setup /etc/fstab +#+END_SRC +#+BEGIN_SRC tmate +grub-install --boot-directory=/boot --no-nvram --efi-directory=/esp --removable +#+END_SRC +#+BEGIN_SRC shell +mkdir -p etc/init +#+END_SRC + +* Test with QEMU +** EFI +** BIOS +* Test with VirtualBox +** EFI +** BIOS +* Test with Real Hardware +** EFI +** BIOS + diff --git a/research/k8s-infra-custom-service-acct.org b/research/k8s-infra-custom-service-acct.org new file mode 100644 index 0000000..2bba470 --- /dev/null +++ b/research/k8s-infra-custom-service-acct.org @@ -0,0 +1,65 @@ +#+TITLE: K8s Infra Custom Service Acct +Working on: +https://github.com/kubernetes/k8s.io/pull/2262#issuecomment-869854631 +Do not create service accounts in groups + +#+begin_src +Given correct permissions the ii-service account can access production logs + +To achieve this I need to: +ensure the role exists that has the needed permissions +ensure the service account exists +bind the service account to the role +#+end_src + +Find the permissions needed for the role: +https://github.com/kubernetes/k8s.io/blob/2cfeed5bacb9f82b3aff1722326e3bec5b5528fc/audit/projects/k8s-artifacts-prod/buckets/k8s-artifacts-gcslogs/iam.json +shows me +the group: k8s-infra-gcs-access-logs@kubernetes.io +is assigned to the role: "roles/storage.objectViewer" +Available GCS roles +https://cloud.google.com/storage/docs/access-control/iam-roles +This confirms roles/storage.objectViewer is the most appropriate role +It contains: +- resourcemanager.projects.get +- resourcemanager.projects.list +- storage.objects.get +- storage.objects.list + +Create a custom roles +https://cloud.google.com/iam/docs/understanding-custom-roles + +TODO: Create a custom role +https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_project_iam_custom_role +#+begin_src :tf export to infra/gcp/clusters/projects/k8s-infra-ii-sandbox/roles.tf +resource "google_project_iam_custom_role" "my-custom-role" { + role_id = "myCustomRole" + title = "My Custom Role" + description = "A description" + permissions = ["iam.roles.list", "iam.roles.create", "iam.roles.delete"] +} +#+end_src + +TODO: Create a service account +https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_service_account#example-usage +#+begin_src :tf +resource "google_service_account" "service_account" { + account_id = "service-account-id" + display_name = "service Account" + description = "service-account-description" + project = "service-account-project" +} +#+end_src + +TODO: Bind service account to role +https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_project_iam#google_project_iam_binding +#+begin_src :tf +resource "google_project_iam_binding" "project" { + project = "your-project-id" + role = "roles/editor" + + members = [ + "user:jane@example.com", + ] +} +#+end_src diff --git a/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-data-processor.sh b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-data-processor.sh new file mode 100644 index 0000000..fd37310 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-data-processor.sh @@ -0,0 +1,38 @@ + + +# Formatting the data + +#!/bin/bash + +SKIP_TO=$1 +READ_FROM=asns.txt +WRITE_TO=asn-data.csv + +TMPDIR=$(mktemp -d) +echo "Temp folder: $TMPDIR" + +ALLOWED_RETRIES=5 + +count=0 +while IFS= read -r asn; do + count=$((count+=1)) + retries=0 + echo "ASN[$count]: $asn" + if [ $asn -eq 0 ] || ( [ ! -z $SKIP_TO ] && [ $count -lt $SKIP_TO ] ); then + echo "Skipping [$count] $asn" + continue + fi + until curl "https://api.bgpview.io/asn/$asn" 2> /dev/null > $TMPDIR/$asn.json && cat $TMPDIR/$asn.json | jq .data.name 2>&1 > /dev/null; do + retries=$((retries+=1)) + if [ $retries -eq $ALLOWED_RETRIES ]; then + echo "Skipping [$count] $asn" + retries=0 + continue 2 + fi + echo "Failed [$retries/$ALLOWED_RETRIES]. Retrying '$asn' in 3 seconds" + sleep 3s + done + cat $TMPDIR/$asn.json | jq -r '.data | (.email_contacts | join(";")) as $contacts | .description_short as $name | [.asn, $name, $contacts] | @csv' 2> /dev/null \ + | tee -a $WRITE_TO 2>&1 > /dev/null + sleep 1s +done < $READ_FROM diff --git a/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-db-data-processor.go b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-db-data-processor.go new file mode 100644 index 0000000..adf4a06 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-db-data-processor.go @@ -0,0 +1,23 @@ + + +// Selecting data + +package main + + +import ( + "fmt" + "os" + "database/sql" + _ "github.com/lib/pq" +) + +func main() { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres:password@%v/peeringdb", os.Getenv("SHARINGIO_PAIR_LOAD_BALANCER_IP"))) + if err != nil { + fmt.Println(err.Error()) + return + } + db.Ping() + fmt.Println("Hello") +} diff --git a/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-to-company-peeringdb-data.org b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-to-company-peeringdb-data.org new file mode 100644 index 0000000..41a8a0c --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/asn-to-company-peeringdb-data.org @@ -0,0 +1,1497 @@ +#+TITLE: K8s Reg Asn Magic +#+PROPERTY: header-args:sql-mode+ :eval never-export :exports both :session none + +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC + +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +* Parse from API +Save the data to a bucket +#+BEGIN_SRC tmate :window prepare +bq extract --destination_format NEWLINE_DELIMITED_JSON k8s_artifacts_gcslogs_appspot.riaan_ipv4_asn_ip_name_over_2500 gs://ii_bq_scratch_dump/ip-and-asn.json +#+END_SRC + +Download the data from the bucket +#+BEGIN_SRC tmate :window prepare +mkdir ~/ip_and_asn_jason +cd ~/ip_and_asn_jason +gsutil cp gs://ii_bq_scratch_dump/ip-and-asn.json ip-and-asn.json +#+END_SRC + +Store only the ASN +#+BEGIN_SRC tmate :window prepare +cat ip-and-asn.json | jq -r '.asn' | sort | uniq > asns.txt +#+END_SRC + +Formatting the data +#+BEGIN_SRC shell :tangle ./asn-data-processor.sh :results silent +#!/bin/bash + +SKIP_TO=$1 +READ_FROM=asns.txt +WRITE_TO=asn-data.csv + +TMPDIR=$(mktemp -d) +echo "Temp folder: $TMPDIR" + +ALLOWED_RETRIES=5 + +count=0 +while IFS= read -r asn; do + count=$((count+=1)) + retries=0 + echo "ASN[$count]: $asn" + if [ $asn -eq 0 ] || ( [ ! -z $SKIP_TO ] && [ $count -lt $SKIP_TO ] ); then + echo "Skipping [$count] $asn" + continue + fi + until curl "https://api.bgpview.io/asn/$asn" 2> /dev/null > $TMPDIR/$asn.json && cat $TMPDIR/$asn.json | jq .data.name 2>&1 > /dev/null; do + retries=$((retries+=1)) + if [ $retries -eq $ALLOWED_RETRIES ]; then + echo "Skipping [$count] $asn" + retries= 0 + continue 2 + fi + echo "Failed [$retries/$ALLOWED_RETRIES]. Retrying '$asn' in 3 seconds" + sleep 3s + done + cat $TMPDIR/$asn.json | jq -r '.data | (.email_contacts | join(";")) as $contacts | .description_short as $name | [.asn, $name, $contacts] | @csv' 2> /dev/null \ + | tee -a $WRITE_TO 2>&1 > /dev/null + sleep 1s +done < $READ_FROM +#+END_SRC + +Run the script +#+BEGIN_SRC tmate :window prepare +chmod +x ./asn-data-processor.sh +time ./asn-data-processor.sh +#+END_SRC + +Upload to the bucket +#+BEGIN_SRC shell :results silent +gsutil cp ./asn-data.csv gs://ii_bq_scratch_dump/asn-data.csv +gsutil ls $_ +#+END_SRC + +Load into big query +#+BEGIN_SRC shell :results silent +bq load --autodetect --source_format=CSV k8s_artifacts_gcslogs_appspot.asn_company_lookup gs://ii_bq_scratch_dump/asn-data.csv +#+END_SRC + +* Parse from Postgres + +Bring up Postgres + +#+BEGIN_SRC tmate :window postgres +docker run -it --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_DB=peeringdb postgres:12.2-alpine +#+END_SRC + +Clone https://git.2e8.dk/peeringdb-simplesync + +#+BEGIN_SRC tmate :window prepare :dir (getenv "HOME") +git clone https://git.2e8.dk/peeringdb-simplesync +cd peeringdb-simplesync +#+END_SRC + +Enter PeeringDB creds + +#+BEGIN_SRC tmate :window prepare :dir (concat (getenv "HOME") "/peeringdb-simplesync") +read -p 'PEERINGDB_USER : ' PEERINGDB_USER +read -p 'PEERINGDB_PASSWORD: ' PEERINGDB_PASSWORD +#+END_SRC + +Write the config for sync.py + +#+BEGIN_SRC python :tangle (concat (getenv "HOME") "/peeringdb-simplesync/config.py") +from requests.auth import HTTPBasicAuth +import os + +host=os.environ['SHARINGIO_PAIR_LOAD_BALANCER_IP'] +user=os.environ['PEERINGDB_USER'] +password=os.environ['PEERINGDB_PASSWORD'] + +def get_config(): + return { + 'db_conn_str': 'dbname=peeringdb host=%s user=postgres password=password' % host, + 'db_schema': 'peeringdb', + 'auth': HTTPBasicAuth('%s' % user, '%s' % password), + } +#+END_SRC + +Dump all of the data + +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +./sync +#+END_SRC + +Set env vars to not prompt for Postgres username and password + +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +export \ + PGUSER=postgres \ + PGPASSWORD=password +#+END_SRC + +** Create a new dump +Dump the database +#+BEGIN_SRC tmate :window peeringdb-sync :dir (concat (getenv "HOME") "/peeringdb-simplesync") +pg_dump -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP > peeringdb-dump-$(date +%Y%m%d).sql +#+END_SRC + +Upload the dump +#+BEGIN_SRC tmate :window peeringdb-sync +gsutil cp peeringdb-dump-$(date +%Y%m%d).sql gs://ii_bq_scratch_dump/peeringdb-dump-$(date +%Y%m%d).sql +#+END_SRC + +** With pre-prepared dump + +Download from the bucket +#+BEGIN_SRC tmate :window peeringdb-sync +gsutil cp gs://ii_bq_scratch_dump/peeringdb-dump-20210512.sql ./peeringdb-dump-20210512.sql +#+END_SRC + +Load the data from the dump into a new/separate Postgres instance +#+BEGIN_SRC tmate :window peeringdb-sync +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < ./peeringdb-dump-20210512.sql +#+END_SRC + +** Explore + +Connect with psql +#+BEGIN_SRC tmate :window peeringdb-sync +psql -U postgres -d peeringdb -h $SHARINGIO_PAIR_LOAD_BALANCER_IP +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | name | asn | website +----+----------------------+-----+--------- + 46 | XS4ALL Internet B.V. | | + 17 | DALnet IRC Network | | + 90 | Plushosting B.V. | | + 91 | YellowBrix | | + 92 | NYCX | | +(5 rows) + +#+end_SRC + +See the tables +#+BEGIN_SRC sql-mode :eval never-export :exports both :session none :sql-user postgres :sql-database peeringdb :sql-server (getenv "SHARINGIO_PAIR_LOAD_BALANCER_IP") :sql-password password +SELECT schemaname, tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + schemaname | tablename +------------+----------- + peeringdb | fac + peeringdb | ix + peeringdb | ixfac + peeringdb | ixlan + peeringdb | ixpfx + peeringdb | net + peeringdb | netfac + peeringdb | netixlan + peeringdb | org + peeringdb | poc +(10 rows) + +#+end_SRC + +Find data from peeringdb.org table +#+BEGIN_SRC sql-mode +select id, data::jsonb ->> 'name' as name, data::jsonb ->> 'asn' as asn, data::jsonb ->> 'website' as "website" from peeringdb.org where 'website' is not null limit 5; +#+END_SRC +#+BEGIN_SRC sql-mode +\d+ +#+END_SRC + +#+RESULTS: +#+begin_SRC example + List of relations + Schema | Name | Type | Owner | Size | Description +-----------+----------+-------+----------+---------+------------- + peeringdb | fac | table | postgres | 3888 kB | + peeringdb | ix | table | postgres | 1288 kB | + peeringdb | ixfac | table | postgres | 960 kB | + peeringdb | ixlan | table | postgres | 624 kB | + peeringdb | ixpfx | table | postgres | 640 kB | + peeringdb | net | table | postgres | 22 MB | + peeringdb | netfac | table | postgres | 15 MB | + peeringdb | netixlan | table | postgres | 25 MB | + peeringdb | org | table | postgres | 10 MB | + peeringdb | poc | table | postgres | 3536 kB | +(10 rows) + +#+end_SRC + +#+BEGIN_SRC sql-mode +\d+ fac +#+END_SRC + +#+RESULTS: +#+begin_SRC example + Table "peeringdb.fac" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +---------+--------------------------+-----------+----------+---------+----------+--------------+------------- + id | integer | | not null | | plain | | + org_id | integer | | not null | | plain | | + status | text | | not null | | extended | | + data | jsonb | | not null | | extended | | + created | timestamp with time zone | | not null | | plain | | + updated | timestamp with time zone | | not null | | plain | | + deleted | timestamp with time zone | | | | plain | | +Indexes: + "fac_pkey" PRIMARY KEY, btree (id) +Access method: heap + +#+end_SRC + +#+begin_src sql-mode +\d peeringdb. +#+end_src + +#+RESULTS: +#+begin_SRC example + Table "peeringdb.fac" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + org_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "fac_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.fac_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.fac" + + Table "peeringdb.ix" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + org_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "ix_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.ix_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.ix" + + Table "peeringdb.ixfac" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + ix_id | integer | | not null | + fac_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "ixfac_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.ixfac_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.ixfac" + + Table "peeringdb.ixlan" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + ix_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "ixlan_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.ixlan_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.ixlan" + + Table "peeringdb.ixpfx" + Column | Type | Collation | Nullable | Default +----------+--------------------------+-----------+----------+--------- + id | integer | | not null | + ixlan_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "ixpfx_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.ixpfx_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.ixpfx" + + Table "peeringdb.net" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + org_id | integer | | not null | + asn | bigint | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "net_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.net_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.net" + + Table "peeringdb.netfac" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + net_id | integer | | not null | + fac_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "netfac_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.netfac_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.netfac" + + Table "peeringdb.netixlan" + Column | Type | Collation | Nullable | Default +----------+--------------------------+-----------+----------+--------- + id | integer | | not null | + net_id | integer | | not null | + ix_id | integer | | not null | + ixlan_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "netixlan_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.netixlan_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.netixlan" + + Table "peeringdb.org" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "org_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.org_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.org" + + Table "peeringdb.poc" + Column | Type | Collation | Nullable | Default +---------+--------------------------+-----------+----------+--------- + id | integer | | not null | + net_id | integer | | not null | + status | text | | not null | + data | jsonb | | not null | + created | timestamp with time zone | | not null | + updated | timestamp with time zone | | not null | + deleted | timestamp with time zone | | | +Indexes: + "poc_pkey" PRIMARY KEY, btree (id) + + Index "peeringdb.poc_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "peeringdb.poc" + +#+end_SRC + +Find data from peeringdb.net table +#+BEGIN_SRC sql-mode +select id, data::jsonb ->> 'name' as name, data::jsonb ->> 'asn' as asn, data::jsonb ->> 'website' as "website" from peeringdb.net limit 5; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | name | asn | website +----+----------------------+-------+-------------------------------- + 83 | Cable&Wireless UK | 5388 | http://www.cw.com/uk + 24 | DSLExtreme | 19817 | http://www.dslextreme.com + 28 | New Edge Networks | 19029 | http://www.newedgenetworks.com + 97 | Netservices Plc | 15444 | http://www.netservicesplc.com + 36 | GrafiX Internet B.V. | 16131 | http://www.grafix.nl/ +(5 rows) + +#+end_SRC + +Getting fields with emails +#+BEGIN_SRC sql-mode +select id, data::jsonb ->> 'name' as name, data::jsonb ->> 'email' as email, net_id from peeringdb.poc where status = 'ok' limit 5; +#+END_SRC + +Connect ASNs with emails by joining names between tables +#+BEGIN_SRC sql-mode +select net.id, + (net.data ->> 'name') as "name", + (net.data ->> 'asn') as "asn", + (net.data ->> 'website') as website, + (poc.data ->> 'email') as email + from peeringdb.net net + left join peeringdb.poc on ((peeringdb.poc.data ->> 'name') = net.data ->> 'name') + where (net.data ->>'website') is not null + order by email asc + limit 5; +#+END_SRC + +#+BEGIN_SRC sql-mode +\d peeringdb.net +#+END_SRC +** schema exploration: +*** peeringdb.ixpfx -- has cidr, but only 2.5k +MAIN issue? this table only has 2500 rows, what we found in ip2asn is over 400k +#+BEGIN_SRC sql-mode +select * from peeringdb.ixpfx limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | ixlan_id | status | data | created | updated | deleted +----+----------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+------------------------ + 1 | 1 | deleted | {"id": 1, "in_dfz": true, "prefix": "206.223.115.0/24", "status": "deleted", "created": "2010-07-29T00:00:00Z", "updated": "2020-08-26T05:23:06Z", "ixlan_id": 1, "protocol": "IPv4"} | 2010-07-29 00:00:00+00 | 2020-08-26 05:23:06+00 | 2020-08-26 05:23:06+00 +(1 row) + +#+end_SRC + + + +#+BEGIN_SRC sql-mode +select id, ixlan_id, status, data::jsonb ->> 'name' as name, data::jsonb ->> 'prefix' as prefix from peeringdb.ixpfx limit 5; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | ixlan_id | status | name | prefix +----+----------+---------+------+------------------- + 1 | 1 | deleted | | 206.223.115.0/24 + 2 | 1 | ok | | 2001:504:0:2::/64 + 3 | 2 | ok | | 208.115.136.0/23 + 4 | 2 | ok | | 2001:504:0:4::/64 + 5 | 3 | ok | | 206.223.118.0/23 +(5 rows) + +#+end_SRC + + +#+BEGIN_SRC sql-mode +select count(data) from peeringdb.ixpfx; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 2275 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.ixpfx limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +---------------------------------------- + { + + "id": 1, + + "in_dfz": true, + + "prefix": "206.223.115.0/24", + + "status": "deleted", + + "created": "2010-07-29T00:00:00Z",+ + "updated": "2020-08-26T05:23:06Z",+ + "ixlan_id": 1, + + "protocol": "IPv4" + + } + { + + "id": 2, + + "in_dfz": true, + + "prefix": "2001:504:0:2::/64", + + "status": "ok", + + "created": "2010-07-29T00:00:00Z",+ + "updated": "2020-08-26T05:23:08Z",+ + "ixlan_id": 1, + + "protocol": "IPv6" + + } +(2 rows) + +#+end_SRC + +*** peeringdb.fac + +#+BEGIN_SRC sql-mode +select * from peeringdb.fac limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +----+--------+---------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+------------------------ + 3 | 7 | deleted | {"id": 3, "aka": "", "city": "New York", "clli": "NYCMNY", "name": "Telehouse New York Broadway", "floor": "", "notes": "", "state": "NY", "suite": "", "npanxx": "212-785", "org_id": 7, "status": "deleted", "country": "US", "created": "2010-07-29T00:00:00Z", "rencode": "", "updated": "2016-11-01T04:16:24Z", "website": "http://www.telehouse.net", "zipcode": "10004-1010", "address1": "25 Broadway", "address2": "", "latitude": null, "org_name": "Telehouse - Global Data Centers", "longitude": null, "name_long": "", "net_count": 0, "tech_email": "", "tech_phone": "", "sales_email": "", "sales_phone": ""} | 2010-07-29 00:00:00+00 | 2016-11-01 04:16:24+00 | 2016-11-01 04:16:24+00 +(1 row) + +#+end_SRC +No sign of ip ranges, gonna try the next one + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.fac limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +----------------------------------------------------- + { + + "id": 3, + + "aka": "", + + "city": "New York", + + "clli": "NYCMNY", + + "name": "Telehouse New York Broadway", + + "floor": "", + + "notes": "", + + "state": "NY", + + "suite": "", + + "npanxx": "212-785", + + "org_id": 7, + + "status": "deleted", + + "country": "US", + + "created": "2010-07-29T00:00:00Z", + + "rencode": "", + + "updated": "2016-11-01T04:16:24Z", + + "website": "http://www.telehouse.net", + + "zipcode": "10004-1010", + + "address1": "25 Broadway", + + "address2": "", + + "latitude": null, + + "org_name": "Telehouse - Global Data Centers", + + "longitude": null, + + "name_long": "", + + "net_count": 0, + + "tech_email": "", + + "tech_phone": "", + + "sales_email": "", + + "sales_phone": "" + + } + { + + "id": 42, + + "aka": "", + + "city": "London", + + "clli": "LONDEN", + + "name": "Equinix London Docklands_ (LD8)", + + "floor": "", + + "notes": "", + + "state": "", + + "suite": "", + + "npanxx": "", + + "org_id": 2, + + "status": "deleted", + + "country": "GB", + + "created": "2010-07-29T00:00:00Z", + + "rencode": "", + + "updated": "2017-01-22T17:23:59Z", + + "website": "http://www.equinix.com/locations/",+ + "zipcode": "E14 9GE", + + "address1": "6-9 Harbour Exchange Square", + + "address2": "", + + "latitude": null, + + "org_name": "Equinix, Inc.", + + "longitude": null, + + "name_long": "", + + "net_count": 0, + + "tech_email": "", + + "tech_phone": "", + + "sales_email": "", + + "sales_phone": "" + + } +(2 rows) + +#+end_SRC + +*** peeringdb.ix +#+BEGIN_SRC sql-mode +select * from peeringdb.ix limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | status | data | created | updated | deleted +----+--------+---------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+------------------------ + 36 | 85 | deleted | {"id": 36, "aka": "", "city": "Paris", "name": "FreeIX", "media": "Ethernet", "notes": "", "org_id": 85, "status": "deleted", "country": "FR", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-14T20:42:55Z", "website": "http://www.freeix.net/", "name_long": "Free Internet eXchange", "net_count": 0, "url_stats": "http://www.freeix.net/mrtg/", "proto_ipv6": false, "tech_email": "", "tech_phone": "", "policy_email": "", "policy_phone": "", "ixf_net_count": 0, "proto_unicast": true, "ixf_last_import": null, "proto_multicast": false, "region_continent": "Europe"} | 2010-07-29 00:00:00+00 | 2016-03-14 20:42:55+00 | 2016-03-14 20:42:55+00 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.ix limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +------------------------------------------------------ + { + + "id": 36, + + "aka": "", + + "city": "Paris", + + "name": "FreeIX", + + "media": "Ethernet", + + "notes": "", + + "org_id": 85, + + "status": "deleted", + + "country": "FR", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-03-14T20:42:55Z", + + "website": "http://www.freeix.net/", + + "name_long": "Free Internet eXchange", + + "net_count": 0, + + "url_stats": "http://www.freeix.net/mrtg/", + + "proto_ipv6": false, + + "tech_email": "", + + "tech_phone": "", + + "policy_email": "", + + "policy_phone": "", + + "ixf_net_count": 0, + + "proto_unicast": true, + + "ixf_last_import": null, + + "proto_multicast": false, + + "region_continent": "Europe" + + } + { + + "id": 19, + + "aka": "", + + "city": "Chicago", + + "name": "AADS", + + "media": "ATM", + + "notes": "", + + "org_id": 48, + + "status": "deleted", + + "country": "US", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-03-14T21:08:05Z", + + "website": "", + + "name_long": "Ameritech Advanced Data Services",+ + "net_count": 0, + + "url_stats": "", + + "proto_ipv6": false, + + "tech_email": "", + + "tech_phone": "", + + "policy_email": "", + + "policy_phone": "", + + "ixf_net_count": 0, + + "proto_unicast": true, + + "ixf_last_import": null, + + "proto_multicast": false, + + "region_continent": "North America" + + } +(2 rows) + +#+end_SRC + +*** peeringdb.ixfac + +#+BEGIN_SRC sql-mode +select * from peeringdb.ixfac limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | ix_id | fac_id | status | data | created | updated | deleted +----+-------+--------+--------+------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 72 | 48 | 164 | ok | {"id": 72, "ix_id": 48, "fac_id": 164, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:43Z"} | 2010-07-29 00:00:00+00 | 2016-03-11 07:21:43+00 | +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.ixfac limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +---------------------------------------- + { + + "id": 72, + + "ix_id": 48, + + "fac_id": 164, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z",+ + "updated": "2016-03-11T07:21:43Z" + + } + { + + "id": 73, + + "ix_id": 48, + + "fac_id": 177, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z",+ + "updated": "2016-03-11T07:21:43Z" + + } +(2 rows) + +#+end_SRC + +*** peeringdb.ixlan + +#+BEGIN_SRC sql-mode +select * from peeringdb.ixlan limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | ix_id | status | data | created | updated | deleted +----+-------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 41 | 41 | ok | {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} | 2010-07-29 00:00:00+00 | 2016-03-11 07:21:58+00 | +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.ixlan limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +-------------------------------------------------- + { + + "id": 41, + + "mtu": null, + + "name": "", + + "descr": "", + + "ix_id": 41, + + "rs_asn": 0, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-03-11T07:21:58Z", + + "arp_sponge": null, + + "dot1q_support": false, + + "ixf_ixp_member_list_url_visible": "Private"+ + } + { + + "id": 43, + + "mtu": null, + + "name": "", + + "descr": "", + + "ix_id": 43, + + "rs_asn": 0, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-03-11T07:21:58Z", + + "arp_sponge": null, + + "dot1q_support": false, + + "ixf_ixp_member_list_url_visible": "Private"+ + } +(2 rows) + +#+end_SRC + + + +*** peeringdb.ixpfx + +#+BEGIN_SRC sql-mode +select * from peeringdb.ixpfx limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | ixlan_id | status | data | created | updated | deleted +----+----------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+------------------------ + 1 | 1 | deleted | {"id": 1, "in_dfz": true, "prefix": "206.223.115.0/24", "status": "deleted", "created": "2010-07-29T00:00:00Z", "updated": "2020-08-26T05:23:06Z", "ixlan_id": 1, "protocol": "IPv4"} | 2010-07-29 00:00:00+00 | 2020-08-26 05:23:06+00 | 2020-08-26 05:23:06+00 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.ixpfx limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +---------------------------------------- + { + + "id": 1, + + "in_dfz": true, + + "prefix": "206.223.115.0/24", + + "status": "deleted", + + "created": "2010-07-29T00:00:00Z",+ + "updated": "2020-08-26T05:23:06Z",+ + "ixlan_id": 1, + + "protocol": "IPv4" + + } + { + + "id": 2, + + "in_dfz": true, + + "prefix": "2001:504:0:2::/64", + + "status": "ok", + + "created": "2010-07-29T00:00:00Z",+ + "updated": "2020-08-26T05:23:08Z",+ + "ixlan_id": 1, + + "protocol": "IPv6" + + } +(2 rows) + +#+end_SRC + +*** peeringdb.net + +#+BEGIN_SRC sql-mode +select * from peeringdb.net limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | org_id | asn | status | data | created | updated | deleted +----+--------+------+--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 83 | 3152 | 5388 | ok | {"id": 83, "aka": "", "asn": 5388, "name": "Cable&Wireless UK", "notes": "This is former Energis Communications UK backbone network (AS5388) which is now owned by Cable and Wireless.\r\n\r\nAS5388 have no direct peering relations any longer, for peering request please contact our backbone AS1273 peering team.\r\n\r\nCable and Wireless global backbone network (AS1273) has a separate PeeringDB entry.\r\n", "org_id": 3152, "status": "ok", "created": "2004-08-03T10:30:54Z", "updated": "2016-03-14T20:23:33Z", "website": "http://www.cw.com/uk", "info_ipv6": false, "info_type": "NSP", "name_long": "", "info_ratio": "Balanced", "info_scope": "Regional", "irr_as_set": "AS-ENERGIS", "policy_url": "", "poc_updated": "2020-01-22T04:24:08Z", "info_traffic": "10-20Gbps", "info_unicast": true, "policy_ratio": false, "route_server": "", "looking_glass": "http://as5388.net/cgi-bin/lg.pl", "info_multicast": false, "info_prefixes4": 30, "info_prefixes6": 2, "netfac_updated": "2016-03-14T21:24:34Z", "policy_general": "Restrictive", "allow_ixp_update": false, "netixlan_updated": null, "policy_contracts": "Not Required", "policy_locations": "Not Required", "info_never_via_route_servers": false} | 2004-08-03 10:30:54+00 | 2016-03-14 20:23:33+00 | +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.net limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + { + + "id": 83, + + "aka": "", + + "asn": 5388, + + "name": "Cable&Wireless UK", + + "notes": "This is former Energis Communications UK backbone network (AS5388) which is now owned by Cable and Wireless.\r\n\r\nAS5388 have no direct peering relations any longer, for peering request please contact our backbone AS1273 peering team.\r\n\r\nCable and Wireless global backbone network (AS1273) has a separate PeeringDB entry.\r\n",+ + "org_id": 3152, + + "status": "ok", + + "created": "2004-08-03T10:30:54Z", + + "updated": "2016-03-14T20:23:33Z", + + "website": "http://www.cw.com/uk", + + "info_ipv6": false, + + "info_type": "NSP", + + "name_long": "", + + "info_ratio": "Balanced", + + "info_scope": "Regional", + + "irr_as_set": "AS-ENERGIS", + + "policy_url": "", + + "poc_updated": "2020-01-22T04:24:08Z", + + "info_traffic": "10-20Gbps", + + "info_unicast": true, + + "policy_ratio": false, + + "route_server": "", + + "looking_glass": "http://as5388.net/cgi-bin/lg.pl", + + "info_multicast": false, + + "info_prefixes4": 30, + + "info_prefixes6": 2, + + "netfac_updated": "2016-03-14T21:24:34Z", + + "policy_general": "Restrictive", + + "allow_ixp_update": false, + + "netixlan_updated": null, + + "policy_contracts": "Not Required", + + "policy_locations": "Not Required", + + "info_never_via_route_servers": false + + } + { + + "id": 24, + + "aka": "Extreme Telecom", + + "asn": 19817, + + "name": "DSLExtreme", + + "notes": "", + + "org_id": 62, + + "status": "ok", + + "created": "2004-07-28T00:00:00Z", + + "updated": "2016-03-14T20:47:30Z", + + "website": "http://www.dslextreme.com", + + "info_ipv6": false, + + "info_type": "Cable/DSL/ISP", + + "name_long": "", + + "info_ratio": "Mostly Inbound", + + "info_scope": "Regional", + + "irr_as_set": "", + + "policy_url": "", + + "poc_updated": "2016-03-14T21:35:12Z", + + "info_traffic": "1-5Gbps", + + "info_unicast": true, + + "policy_ratio": false, + + "route_server": "", + + "looking_glass": "", + + "info_multicast": false, + + "info_prefixes4": 69, + + "info_prefixes6": 3, + + "netfac_updated": "2016-03-14T20:33:54Z", + + "policy_general": "Open", + + "allow_ixp_update": false, + + "netixlan_updated": "2021-05-12T00:13:00.764215Z", + + "policy_contracts": "Not Required", + + "policy_locations": "Not Required", + + "info_never_via_route_servers": false + + } +(2 rows) + +#+end_SRC + +*** peeringdb.netixlan + +#+BEGIN_SRC sql-mode +select * from peeringdb.netixlan limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | net_id | ix_id | ixlan_id | status | data | created | updated | deleted +----+--------+-------+----------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 81 | 3 | 64 | 64 | ok | {"id": 81, "asn": 31800, "name": "NL-ix: Main", "ix_id": 64, "notes": "", "speed": 1000, "net_id": 3, "status": "ok", "created": "2010-07-29T00:00:00Z", "ipaddr4": "193.239.116.162", "ipaddr6": null, "updated": "2016-03-14T21:02:11Z", "ixlan_id": 64, "is_rs_peer": false, "operational": true} | 2010-07-29 00:00:00+00 | 2016-03-14 21:02:11+00 | +(1 row) +#+end_SRC +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.netixlan limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +---------------------------------------- + { + + "id": 81, + + "asn": 31800, + + "name": "NL-ix: Main", + + "ix_id": 64, + + "notes": "", + + "speed": 1000, + + "net_id": 3, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z",+ + "ipaddr4": "193.239.116.162", + + "ipaddr6": null, + + "updated": "2016-03-14T21:02:11Z",+ + "ixlan_id": 64, + + "is_rs_peer": false, + + "operational": true + + } + { + + "id": 84, + + "asn": 31800, + + "name": "Equinix Dallas", + + "ix_id": 3, + + "notes": "", + + "speed": 1000, + + "net_id": 3, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z",+ + "ipaddr4": "206.223.118.88", + + "ipaddr6": null, + + "updated": "2016-03-14T21:02:11Z",+ + "ixlan_id": 3, + + "is_rs_peer": false, + + "operational": true + + } +(2 rows) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select count(data) from peeringdb.netixlan limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 55319 +(1 row) + +#+end_SRC + +*** peeringdb.org + +#+BEGIN_SRC sql-mode +select * from peeringdb.org limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | status | data | created | updated | deleted +----+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 46 | ok | {"id": 46, "aka": "", "city": "", "name": "XS4ALL Internet B.V.", "floor": "", "notes": "", "state": "", "suite": "", "status": "ok", "country": "", "created": "2004-07-28T00:00:00Z", "updated": "2016-03-14T20:23:26Z", "website": "", "zipcode": "", "address1": "", "address2": "", "latitude": null, "longitude": null, "name_long": ""} | 2004-07-28 00:00:00+00 | 2016-03-14 20:23:26+00 | +(1 row) + +#+end_SRC +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.org limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + jsonb_pretty +---------------------------------------- + { + + "id": 46, + + "aka": "", + + "city": "", + + "name": "XS4ALL Internet B.V.", + + "floor": "", + + "notes": "", + + "state": "", + + "suite": "", + + "status": "ok", + + "country": "", + + "created": "2004-07-28T00:00:00Z",+ + "updated": "2016-03-14T20:23:26Z",+ + "website": "", + + "zipcode": "", + + "address1": "", + + "address2": "", + + "latitude": null, + + "longitude": null, + + "name_long": "" + + } + { + + "id": 17, + + "aka": "", + + "city": "", + + "name": "DALnet IRC Network", + + "floor": "", + + "notes": "", + + "state": "", + + "suite": "", + + "status": "ok", + + "country": "", + + "created": "2004-07-28T00:00:00Z",+ + "updated": "2016-03-14T20:27:47Z",+ + "website": "", + + "zipcode": "", + + "address1": "", + + "address2": "", + + "latitude": null, + + "longitude": null, + + "name_long": "" + + } +(2 rows) + +#+end_SRC + +*** peeringdb.poc +#+BEGIN_SRC sql-mode +select * from peeringdb.poc limit 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + id | net_id | status | data | created | updated | deleted +-----+--------+--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+------------------------+--------- + 100 | 115 | ok | {"id": 100, "url": "", "name": "Telefonica DE Peering Team", "role": "Policy", "email": "peering.de@telefonica.com", "phone": "", "net_id": 115, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-05-20T13:55:47Z", "visible": "Public"} | 2010-07-29 00:00:00+00 | 2016-05-20 13:55:47+00 | +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select jsonb_pretty(data) from peeringdb.poc limit 2; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + { + + "id": 100, + + "url": "", + + "name": "Telefonica DE Peering Team",+ + "role": "Policy", + + "email": "peering.de@telefonica.com",+ + "phone": "", + + "net_id": 115, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2016-05-20T13:55:47Z", + + "visible": "Public" + + } + { + + "id": 48, + + "url": "", + + "name": "NOC", + + "role": "NOC", + + "email": "noc@stealth.net", + + "phone": "+12122322020", + + "net_id": 26, + + "status": "ok", + + "created": "2010-07-29T00:00:00Z", + + "updated": "2020-05-20T23:14:22Z", + + "visible": "Public" + + } + +#+end_SRC + + +** Post process org blocks +#+NAME: json-res +#+BEGIN_SRC sql-mode :var json-r="" +select data from peeringdb.ixlan limit 1; +#+END_SRC + +#+RESULTS: json-res +#+begin_SRC example + {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} + +#+end_SRC + +Dang it I am missing something here.... +#+BEGIN_SRC shell :process_r yes :post json-res[:process_r yes](*this*) +jq '.' +#+END_SRC + +#+RESULTS: +#+begin_example + {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} +#+end_example + +** Wrap header +#+BEGIN_SRC sql-mode :results sql :wrap EXPORT json +select data from peeringdb.ixlan limit 1; +#+END_SRC + +#+RESULTS: +#+begin_EXPORT json + {"id": 41, "mtu": null, "name": "", "descr": "", "ix_id": 41, "rs_asn": 0, "status": "ok", "created": "2010-07-29T00:00:00Z", "updated": "2016-03-11T07:21:58Z", "arp_sponge": null, "dot1q_support": false, "ixf_ixp_member_list_url_visible": "Private"} + +#+end_EXPORT + + +** Building with Postgres +#+BEGIN_SRC sql-mode +create schema asntocompany; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +ERROR: schema "asntocompany" already exists +#+end_SRC +#+BEGIN_SRC sql-mode +create table asnproc ( + asn bigint not null primary key +); +\copy asnproc from '/home/ii/peeringdb-simplesync/asns.txt'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +CREATE TABLE +COPY 415 +#+end_SRC + +#+BEGIN_SRC sql-mode +select (net.data ->> 'name') as "name", + asn + from peeringdb.net + where (net.data ->> 'name') ilike '%google%' + limit 5; +#+END_SRC + +#+BEGIN_SRC sql-mode +select count(*) +from peeringdb.poc p +where (p.data ->> 'email') is not null; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + count +------- + 10756 +(1 row) + +#+end_SRC + +#+BEGIN_SRC sql-mode +select asn.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website", + (poc.data ->> 'email') as email + from asnproc asn + left join peeringdb.net net on (net.asn = asn.asn) + left join peeringdb.poc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) + -- where (net.data ->>'website') is not null + -- where (poc.data ->> 'email') is not null + order by email asc; +#+END_SRC + +#+BEGIN_SRC sql-mode +select + (poc.data ->> 'name') as poc_name +from peeringdb.poc poc +-- left join peeringdb.poc poc on ((net.data ->>'name') = (poc.data ->>'name')) +where (poc.data ->> 'name') ilike '%google%' +or (poc.data ->> 'name') ilike '%amazon%' +or (poc.data ->> 'name') ilike '%microsoft%'; +-- where (net.data ->>'name') ilike '%google%'; +-- select data from peeringdb.net where (data ->> 'asn')::bigint = 21789 limit 1; +#+END_SRC + +#+BEGIN_SRC sql-mode +begin; +-- create table asnproc ( +-- asn bigint not null primary key +-- ); +-- \copy asnproc from '/home/ii/peeringdb-simplesync/asns.txt'; +select count(*) from peeringdb.poc; +select net.id, + asnproc.asn, + (net.data ->> 'name') as "name", + (net.data ->> 'website') as "website" + -- (poc.data ->> 'email') as email + from asnproc + join peeringdb.net net on ((net.data ->> 'asn')::bigint = asnproc.asn) + -- left join peeringdb.poc poc on ((poc.data ->> 'name') = 'chonkers') + -- left join peeringdb.poc poc on ((poc.data ->> 'name') = (net.data ->> 'name')) + -- where (net.data ->>'website') is not null + -- order by email asc + limit 5; +rollback; +#+END_SRC + +** Building with Go + +Scripting the data fetching in Go +#+BEGIN_SRC go :tangle ./asn-db-data-processor.go +package main + +import ( + "fmt" + "log" + "os" + "database/sql" + _ "github.com/lib/pq" +) + +type asnToCompany struct { + ID string + Name string + ASN string + Email string +} + +type asnToCompanySet []asnToCompany + +func GetDBConnection() (*sql.DB, error) { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres:password@%v/peeringdb", os.Getenv("SHARINGIO_PAIR_LOAD_BALANCER_IP"))) + db.Ping() + return db, err +} + +func main() { + db, err := GetDBConnection() + if err != nil { + log.Fatalln(err) + } + db.Ping() +} +#+END_SRC + +* Clean up +Remove the table +#+BEGIN_SRC shell +bq rm k8s_artifacts_gcslogs_appspot.asn_company_lookup +#+END_SRC + +Clean up +#+BEGIN_SRC shell :results silent +rm -f asn-data.csv +#+END_SRC +* scratch +#+begin_src sql-mode +begin; +create table netfun(ip cidr); +insert into netfun(ip) values('206.223.115.0/24'::cidr); +select ip as ip, +host(network(ip)::inet) as start, +host(broadcast(ip)::inet) as end +from netfun; +rollback; +#+end_src + + +#+RESULTS: +#+begin_SRC example +BEGIN +CREATE TABLE +INSERT 0 1 + ip | start | end +------------------+---------------+----------------- + 206.223.115.0/24 | 206.223.115.0 | 206.223.115.255 +(1 row) + +ROLLBACK +#+end_SRC + +select id, ixlan_id, status, data::jsonb ->> 'name' as name, data::jsonb ->> 'prefix' as prefix from peeringdb.ixpfx limit 5; +#+begin_src sql-mode + do $$ + DECLARE + Counter integer := 1; + BEGIN + create table netfun(ip cidr); + WHILE Counter <= 2275 loop + insert into netfun(ip) values((select prefix from peeringdb.ixlanid_ip where id = Counter limit 1)::cidr); + Counter := Counter + 1; + end loop; + END + $$; +#+end_src + +#+RESULTS: +#+begin_SRC example +peeringdb$# peeringdb$# peeringdb$# peeringdb$# peeringdb$# peeringdb$# peeringdb$# peeringdb$# peeringdb$# peeringdb$# DO +#+end_SRC + + +#+begin_src sql-mode +select ip as ip, +host(network(ip)::inet) as ip_start, +host(broadcast(ip)::inet) as ip_end +into table peeringdb.expanded_ip3 +from netfun; +#+end_src + +#+RESULTS: +#+begin_SRC example +SELECT 2275 +#+end_SRC + +#+begin_src sql-mode + select prefix as ip, host(network(prefix)::inet) as start, host(broadcast(ip)::inet) from peeringdb.ixlanid_ip limit 10; +#+end_src + +#+begin_src sql-mode +\copy (select * from peeringdb.expanded_ip3 where ip_end NOT LIKE '%:%') to '~/peeringdb_expanded_ipv4.csv' csv header; +#+end_src + +#+RESULTS: +#+begin_SRC example +COPY 1058 +#+end_SRC + +#+begin_src shell +bq load --autodetect k8s_artifacts_gcslogs_appspot.peeringdb_expanded_ipv6 /home/ii/peeringdb_expanded_ipv6.csv + +#+end_src + +#+begin_src sql-mode + select count(id) from peeringdb.ixpfx; +#+end_src + +#+RESULTS: +#+begin_SRC example + count +------- + 2275 +(1 row) + +#+end_SRC + + +#+begin_src sql-mode + select id, ixlan_id, data::jsonb ->> 'prefix' as prefix INTO TABLE peeringdb.ixlanid_ip from peeringdb.ixpfx; +#+end_src + +#+RESULTS: +#+begin_SRC example +SELECT 2275 +#+end_SRC + +#+begin_src sql-mode + select prefix from peeringdb.ixlanid_ip limit 10; +#+end_src + +#+RESULTS: +#+begin_SRC example + prefix +------------------- + 206.223.115.0/24 + 2001:504:0:2::/64 + 208.115.136.0/23 + 2001:504:0:4::/64 + 206.223.118.0/23 + 2001:504:0:5::/64 + 206.223.123.0/24 + 2001:504:0:3::/64 + 206.223.116.0/23 + 2001:504:0:1::/64 +(10 rows) + +#+end_SRC + + +#+begin_src sql-mode + \l +#+end_src + +#+RESULTS: +#+begin_SRC example + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +-----------+----------+----------+------------+------------+----------------------- + peeringdb | postgres | UTF8 | en_US.utf8 | en_US.utf8 | + postgres | postgres | UTF8 | en_US.utf8 | en_US.utf8 | + template0 | postgres | UTF8 | en_US.utf8 | en_US.utf8 | =c/postgres + + | | | | | postgres=CTc/postgres + template1 | postgres | UTF8 | en_US.utf8 | en_US.utf8 | =c/postgres + + | | | | | postgres=CTc/postgres +(4 rows) + +#+end_SRC diff --git a/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/go.mod b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/go.mod new file mode 100644 index 0000000..314ed8b --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/go.mod @@ -0,0 +1,5 @@ +module asn-to-company-peeringdb-data + +go 1.16 + +require github.com/lib/pq v1.10.1 diff --git a/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/go.sum b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/go.sum new file mode 100644 index 0000000..f356bf9 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/asn-to-company-peeringdb-data/go.sum @@ -0,0 +1,2 @@ +github.com/lib/pq v1.10.1 h1:6VXZrLU0jHBYyAqrSPa+MgPfnSvTPuMgK+k0o5kVFWo= +github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= diff --git a/research/k8s-infra-registry-artifacts-migration/becoming-a-mirror/README.org b/research/k8s-infra-registry-artifacts-migration/becoming-a-mirror/README.org new file mode 100644 index 0000000..dbf418a --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/becoming-a-mirror/README.org @@ -0,0 +1,74 @@ +#+TITLE: Becoming a mirror + +* Requirements +- at least 1TB of storage (TODO@BobyMCbobs: determine storage requirements) +- the container registry must be publically pull-able without any credentials +- the container registry implementation must be OCI compliant +- one read-only pull account with known basic htpasswd credentials for private pull +- one read-write pull and push account with private basic htpasswd credientials for image distribution +- tests must pass +- no vendor branding on pages related to the mirroring + +* Adding your mirror +1. set up your mirror (registry, artifacts) +2. create a PR in /kubernetes/k8s.io/registry.k8s.io/infra/meta/asns/ with: + - your ASNs (/asns/) + - your human recognisable vendor name (/name/) + - domain where registry and artifacts will be redirected to (/redirectsTo/) + - at least two email contacts (/contacts/) + - read-only private credentials (/readOnlyPrivateCredentials/) +3. allow time for testing + +#+begin_notes +- currently there are no /contacts/, /readOnlyPrivateCredentials/ fields in the ASN yamls +#+end_notes + +* Validation +Run the test script against your registry and artifact domains. +The script will: +- perform a /docker pull/ for a container image +- run a cURL against a file in artifacts + +#+begin_notes +- doing a pull won't work immediately, without images already existing in the container registry +#+end_notes + +* Diagram :noexport: +#+begin_src dot :file registry-k8s-io-becoming-mirror-diagram.svg :results silent +digraph "becoming a mirror" { + label="registry.k8s.io becoming a mirror diagram" + labelloc="t" + graph [compound=true] + + CheckRequirements [label="Check requirements"] + MakeAPR [label="Make a PR"] + ArtifactHostingAcceptance [label="Artifact hosting accepted"] + + subgraph AddingYourMirror { + label="Adding your mirror" + color=blue + SetupRegistry [label="Set up an OCI compliant container registry"] + SetupBucket [label="Set up a bucket with public ingress URI"] + + SetupRegistry -> MakeAPR + SetupBucket -> MakeAPR + } + subgraph Validation { + label="Validation" + color=blue + RunTests [label="Run tests"] + } + + CheckRequirements -> SetupRegistry + CheckRequirements -> SetupBucket + MakeAPR -> RunTests [label="by human or ProwJob"] + RunTests -> ArtifactHostingAcceptance [label="if tests pass"] +} +#+end_src + +#+begin_src shell :results silent :dir . +cp registry-k8s-io-becoming-mirror-diagram.svg $HOME/public_html/ +#+end_src + +* Process +[[./registry-k8s-io-becoming-mirror-diagram.svg]] diff --git a/research/k8s-infra-registry-artifacts-migration/becoming-a-mirror/registry-k8s-io-becoming-mirror-diagram.svg b/research/k8s-infra-registry-artifacts-migration/becoming-a-mirror/registry-k8s-io-becoming-mirror-diagram.svg new file mode 100644 index 0000000..d6bd3e8 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/becoming-a-mirror/registry-k8s-io-becoming-mirror-diagram.svg @@ -0,0 +1,88 @@ + + + + + + +becoming a mirror + +registry.k8s.io becoming a mirror diagram + + +CheckRequirements + +Check requirements + + + +SetupRegistry + +Set up an OCI compliant container registry + + + +CheckRequirements->SetupRegistry + + + + + +SetupBucket + +Set up a bucket with public ingress URI + + + +CheckRequirements->SetupBucket + + + + + +MakeAPR + +Make a PR + + + +RunTests + +Run tests + + + +MakeAPR->RunTests + + +by human or ProwJob + + + +ArtifactHostingAcceptance + +Artifact hosting accepted + + + +SetupRegistry->MakeAPR + + + + + +SetupBucket->MakeAPR + + + + + +RunTests->ArtifactHostingAcceptance + + +if tests pass + + + diff --git a/research/k8s-infra-registry-artifacts-migration/building-the-solution/README.org b/research/k8s-infra-registry-artifacts-migration/building-the-solution/README.org new file mode 100644 index 0000000..6f1ffa7 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/building-the-solution/README.org @@ -0,0 +1,24 @@ +#+TITLE: Building registry.k8s.io + +The goal of this doc is to describe the processes of what is involved and needed in the registry redirection. + +* What is required for building registry.k8s.io +** Source IP from a HTTP request +This will be done by using the /X-Real-Ip/ header, provided by the Ingress controller. + +** Look up table for source IP to ASN +PeeringDB contains the IP blocks per each ASN. +The source IP can be matched against a source block via either a SQL query of a builtin Golang function. + +** Look up table for ASN to company +The PeeringDB data dump will provide all the information needed to match Company <-> ASN. + +** Connect look up tables with request rewrites +Questions: +- will the data be stored in a SQL database, or a ConfigMap + JSON store? + +** Provide 302 request rewrite +This will be implemented in either completely custom code based on [[https://github.com/kubernetes/k8s.io/tree/main/artifactserver][ArtifactServer]], or an Envoy with a Go WASM filter. + +** Provide logs +Logs will provide information about the request and the redirection. diff --git a/research/k8s-infra-registry-artifacts-migration/container-image-hash-to-tag/container-image-hash-to-tag.org b/research/k8s-infra-registry-artifacts-migration/container-image-hash-to-tag/container-image-hash-to-tag.org new file mode 100644 index 0000000..45c484f --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/container-image-hash-to-tag/container-image-hash-to-tag.org @@ -0,0 +1,196 @@ +#+TITLE: Container Image Hash To Tag + +* Install dependencies +#+BEGIN_SRC shell :results silent +go install github.com/google/go-containerregistry/cmd/gcrane@latest +sudo apt install parallel -y +#+END_SRC + +* Authentication +#+BEGIN_SRC tmate :window gcloud-setup +gcloud auth login +#+END_SRC + +#+BEGIN_SRC tmate :window gcloud-setup +gcloud auth application-default login +#+END_SRC + +#+BEGIN_SRC tmate :window gcloud-setup +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +* Investigating the data +#+BEGIN_SRC sql-mode :product bq +SELECT resource, hash_no FROM k8s_artifacts_gcslogs_appspot.resource_and_hash_distinct_list LIMIT 1; +#+END_SRC + +#+RESULTS: +#+begin_SRC example ++----------+------------------------------------------------------------------+ +| resource | hash_no | ++----------+------------------------------------------------------------------+ +| pause | 2a060e2e7101d419352bf82c613158587400be743482d9a537ec4a9d1b4eb93c | ++----------+------------------------------------------------------------------+ +#+end_SRC + +* Dumping the data +#+BEGIN_SRC shell :results silent +bq extract --destination_format NEWLINE_DELIMITED_JSON k8s_artifacts_gcslogs_appspot.resource_and_hash_distinct_list gs://ii_bq_scratch_dump/resource_and_hash_distinct_list.json +#+END_SRC + +#+BEGIN_SRC shell :results silent +gsutil cp gs://ii_bq_scratch_dump/resource_and_hash_distinct_list.json /tmp/resource_and_hash_distinct_list.json +#+END_SRC + +* Understanding the data +#+BEGIN_SRC shell +cat /tmp/resource_and_hash_distinct_list.json | jq -s .[0] +#+END_SRC + +#+RESULTS: +#+begin_example +{ + "cs_referer": "https://k8s.gcr.io/v2/pause/blobs/sha256:2a060e2e7101d419352bf82c613158587400be743482d9a537ec4a9d1b4eb93c", + "resource": [ + "pause" + ], + "hash_no": [ + "2a060e2e7101d419352bf82c613158587400be743482d9a537ec4a9d1b4eb93c" + ] +} +#+end_example + +* Getting the tags from the hashes +** Checkout stp's layerMapper branch of k8s.io +#+BEGIN_SRC shell +cd ~/kubernetes/k8s.io +git remote add stp-ip https://github.com/stp-ip/k8s.io +git fetch stp-ip +git checkout layerMapper +#+END_SRC + +#+RESULTS: +#+begin_example +Branch 'layerMapper' set up to track remote branch 'layerMapper' from 'stp-ip'. +#+end_example + +** Running the first hash number +#+BEGIN_SRC shell +HASH=$(cat /tmp/resource_and_hash_distinct_list.json | jq -s -r .[0].hash_no[0]) +cd ~/kubernetes/k8s.io/metrics/access-logs/poc-layer-mapper +bash ./matcher.sh $HASH +#+END_SRC + +#+RESULTS: +#+begin_example +==== +Layer:2a060e2e7101d419352bf82c613158587400be743482d9a537ec4a9d1b4eb93c +Repos + kubernetes/pause-arm64 + pause-arm64 + +Tags + kubernetes/pause-arm64:3.2/amd64 + pause-arm64:3.2/amd64 + +#+end_example + +** Generating JSON v1 +#+BEGIN_SRC shell :tangle /tmp/sort_resource_and_hash_distinct_list.sh +cd ~/kubernetes/k8s.io/metrics/access-logs/poc-layer-mapper +BASE="us.gcr.io/k8s-artifacts-prod/" +( + for ARG in $(cat /tmp/resource_and_hash_distinct_list.json | jq -s -r '.[].hash_no[0]'); do + declare -a MATCH_TAG=() + MATCH_TAG+=$(find tags -type f | cut -d "/" -f2- <<< $(parallel -k -j1000% -n 1000 -m grep -H -l -m1 $ARG {})) + IFS=$'\n' MATCH_TAG_SORTED=($(sort <<<"${MATCH_TAG[*]}")) + unset IFS + declare -a TAGS=() + for V in "${MATCH_TAG_SORTED[@]}" + do + V=${V//\_/\/} + V=${V#"$BASE"} + TAGS+="$V " + done + jq -n '{hash: $ARGS.named["hash"], tags: $ARGS.positional}' --arg hash "$ARG" --args ${TAGS[*]} +done +) > /tmp/sorted_resource_and_hash_distinct_list.json +#+END_SRC + +Running the script +#+BEGIN_SRC tmate :window sort-resource-distinct +bash /tmp/sort_resource_and_hash_distinct_list.sh +#+END_SRC + +Too inefficent and slow. + +** Generating JSON v2 +#+BEGIN_SRC shell :tangle /tmp/sort_resource_and_hash_distinct_list_v2.sh +cd ~/kubernetes/k8s.io/metrics/access-logs/poc-layer-mapper +BASE="us.gcr.io/k8s-artifacts-prod/" +HASHES=$(cat /tmp/resource_and_hash_distinct_list.json | jq -s -r '.[].hash_no[0]') +function resource_to_tag { + ARG=$1 + declare -a MATCH_TAG=() + MATCH_TAG+=$(find tags -type f | cut -d "/" -f2- <<< $(parallel -k -j1000% -n 1000 -m grep -H -l -m1 $ARG {})) + IFS=$'\n' MATCH_TAG_SORTED=($(sort <<<"${MATCH_TAG[*]}")) + unset IFS + declare -a TAGS=() + for V in "${MATCH_TAG_SORTED[@]}" + do + V=${V//\_/\/} + V=${V#"$BASE"} + TAGS+="$V " + done + jq -n '{hash: $ARGS.named["hash"], tags: $ARGS.positional}' --arg hash "$ARG" --args ${TAGS[*]} +} +export -f resource_to_tag + +for HASH in $HASHES; do + echo $HASH | parallel -k -j1000% -n 1000 -m resource_to_tag $HASH {} +done +#+END_SRC + +Running the script +#+BEGIN_SRC tmate :window sort-resource-distinct-v2 +bash /tmp/sort_resource_and_hash_distinct_list_v2.sh +#+END_SRC + +This doesn't work. + +** Loading the tags into BigQuery + +#+BEGIN_SRC tmate :window load-into-bq +cd ~/kubernetes/k8s.io/metrics/access-logs/poc-layer-mapper/tags +for FILENAME in *; do + FILENAME_CLEAN=$(echo $FILENAME | sed 's/_/\//g') + IMAGE=$(echo $FILENAME_CLEAN | cut -d ':' -f1) + TAG=$(echo $FILENAME_CLEAN | cut -d ':' -f2) + echo "image,tag,hash" > /tmp/image-tag-hash.csv + cat $FILENAME | jq -r '(.config.digest |= split(":")[1]) | [$ARGS.named["image"], $ARGS.named["tag"], .config.digest] | @csv' --arg image "$IMAGE" --arg tag "$TAG" | tee -a /tmp/image-tag-hash.csv +done +#+END_SRC + +Clean up +#+BEGIN_SRC shell :results silent +rm /tmp/image-tag-hash.csv +#+END_SRC + +Upload to bucket +#+BEGIN_SRC shell :results silent +gsutil cp /tmp/image-tag-hash.csv gs://ii_bq_scratch_dump/image-tag-hash.csv +#+END_SRC + +Load into BigQuery +#+BEGIN_SRC shell :results silent +bq load --autodetect --source_format=CSV k8s_artifacts_gcslogs_appspot.image_tag_hash gs://ii_bq_scratch_dump/image-tag-hash.csv image:string,tag:string,hash:string +#+END_SRC + +** Parallel version +#+BEGIN_SRC tmate :window load-into-bq2 +cd ~/kubernetes/k8s.io/metrics/access-logs/poc-layer-mapper/tags +FILENAME_CLEAN=$(echo $FILENAME | sed 's/_/\//g') +IMAGE=$(echo $FILENAME_CLEAN | cut -d ':' -f1) +TAG=$(echo $FILENAME_CLEAN | cut -d ':' -f2) +find . -type f -print | tr -d './' | head -n 5 | parallel jq -r '(.config.digest |= split(":")[1]) | ($ARGS.named["filename"] | split(":")[0]) as $image | ($ARGS.named["filename"] | split(":")[1]) as $tag | [$image, $tag, .config.digest] | @csv' --arg filename {} +#+END_SRC diff --git a/research/k8s-infra-registry-artifacts-migration/diagram.dot b/research/k8s-infra-registry-artifacts-migration/diagram.dot new file mode 100644 index 0000000..c7370f1 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/diagram.dot @@ -0,0 +1,65 @@ +digraph G { + graph [bb="0,0,889.5,371", + compound=true, + label="registry.k8s.io diagram", + labelloc=t, + lheight=0.21, + lp="444.75,359.5", + lwidth=2.31 + ]; + node [label="\N"]; + IngressTraffic [height=0.5, + label="Ingress traffic", + pos="227.5,330", + width=2.1123]; + Implementation [color=green, + height=0.52778, + label="Implementation (registry.k8s.io)\n(an Envoy go-WASM or ArtifactServer based implementation)", + pos="227.5,242", + shape=rectangle, + width=6.3194]; + IngressTraffic -> Implementation [pos="e,227.5,261.27 227.5,311.6 227.5,300.06 227.5,284.65 227.5,271.36"]; + ExternalDataSources [height=0.52778, + label="External Data Sources\n(such as https://bgp.he.net)", + pos="134.5,19", + shape=rectangle, + width=2.9444]; + Datastore [height=0.94444, + label="Datastore\n(a SQL database\nor\nJSON files from a ConfigMap)", + pos="134.5,123", + shape=rectangle, + width=3.1528]; + Datastore -> ExternalDataSources [label="periodic fetch", + lp="184,63.5", + pos="e,134.5,38.106 134.5,88.884 134.5,75.836 134.5,61.001 134.5,48.448"]; + Implementation -> Datastore [label="determine closest\ncloud provider from IP", + lp="216.5,190", + pos="e,126.83,157.34 155.01,222.83 147.38,218.18 140.57,212.34 135.5,205 128.05,194.23 125.89,180.58 126.15,167.57"]; + CloudProvider [height=0.5, + label="CloudProvider[n] container registry", + pos="566.5,123", + shape=rectangle, + style=dotted, + width=3.7778]; + Implementation -> CloudProvider [label="302 redirect\npoint cloud provider at their hosted container registry", + lp="511,190", + pos="e,436.9,141 271.65,222.83 280.8,217.87 289.94,211.92 297.5,205 309.15,194.35 303.51,183.96 316.5,175 335.48,161.91 380.36,151.03 \ +426.94,142.73"]; + K8sDotGCRDotIO [height=0.5, + label="k8s.gcr.io", + pos="731.5,330", + shape=rectangle, + width=1.1806]; + CIP [height=0.5, + label="Container Image Promoter", + pos="731.5,242", + shape=rectangle, + width=2.8889]; + K8sDotGCRDotIO -> CIP [dir=both, + label="Promote an image 1/2", + lp="810.5,286.5", + pos="s,731.5,311.6 e,731.5,260.08 731.5,301.34 731.5,291.38 731.5,280.15 731.5,270.22"]; + CIP -> CloudProvider [label="Promote an image 2/2", + lp="805.5,190", + pos="e,653.55,141.12 729.96,223.96 727.76,209.2 722.43,188.2 709.5,175 696.66,161.89 680.23,152.14 663.11,144.89"]; +} diff --git a/research/k8s-infra-registry-artifacts-migration/diagram.org b/research/k8s-infra-registry-artifacts-migration/diagram.org new file mode 100644 index 0000000..c757f48 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/diagram.org @@ -0,0 +1,50 @@ +#+TITLE: Diagram of k8s infra + +#+begin_src dot :file diagram.png +digraph G { + label="registry.k8s.io" + labelloc="t" + graph [compound=true] + node [shape=record] + + RegistryK8sIo [label="registry.k8s.io"] + + Request -> RegistryK8sIo [color="orange"] + RegistryK8sIo -> "k8s.gcr.io" [color="orange"] + "GCP to GCP request" -> RegistryK8sIo [color="blue"] + RegistryK8sIo -> "k8s.gcr.io" [color="blue"] + "CP to CP request" -> RegistryK8sIo [color="green"] + RegistryK8sIo -> "CP's registry" [color="green"] + + subgraph cluster_0 { + node [style=filled]; + label = "Google Cloud Platform"; + "k8s.gcr.io"; "GCP to GCP request" [label="Request"] + color=blue; + } + + subgraph cluster_1 { + node [style=dotted]; + label = "Credit Provider's cloud service (such as AWS or Azure)"; + "CP's registry"; "CP to CP request" [label="Request"] + color=green; + } + + subgraph cluster_2 { + node [style=filled]; + label = "Anywhere" + Request + color=orange; + } +} +#+end_src + +#+RESULTS: +#+begin_src dot +[[file:diagram.png]] +#+end_src + +#+begin_src shell :results silent +cp ./diagram.png ~/public_html/diagram.png +#+end_src + diff --git a/research/k8s-infra-registry-artifacts-migration/diagram.png b/research/k8s-infra-registry-artifacts-migration/diagram.png new file mode 100644 index 0000000..99f6d12 Binary files /dev/null and b/research/k8s-infra-registry-artifacts-migration/diagram.png differ diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy-config.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy-config.yaml new file mode 100644 index 0000000..bb9c9e1 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy-config.yaml @@ -0,0 +1,31 @@ +# envoy-config.yaml + +node: + id: web_service + cluster: web_service + +dynamic_resources: + lds_config: + path: /var/lib/envoy/lds.yaml + +static_resources: + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 +admin: + access_log_path: /dev/null + address: + socket_address: + address: 0.0.0.0 + port_value: 9003 diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy-lds.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy-lds.yaml new file mode 100644 index 0000000..eb79684 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy-lds.yaml @@ -0,0 +1,58 @@ +# envoy-lds.yaml + +resources: +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: listener_0 + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + name: envoy.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + local reg1 = "k8s.gcr.io" + local reg2 = "registry-1.docker.io" + local reg2WithIP = "192.168.0.18" + function envoy_on_request(request_handle) + local reg = reg1 + remoteAddr = request_handle:headers():get("x-real-ip") + if remoteAddr == reg2WithIP then + request_handle:logInfo("remoteAddr: "..reg2WithIP) + reg = reg2 + end + request_handle:logInfo("REG: "..reg) + request_handle:logInfo("REMOTEADDR: "..remoteAddr) + request_handle:logInfo("Hello") + request_handle:logInfo("My friend") + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy.yaml new file mode 100644 index 0000000..89e207c --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/envoy.yaml @@ -0,0 +1,111 @@ +# Deploying Envoy + +apiVersion: apps/v1 +kind: Deployment +metadata: + # annotations: + # lastcfg: | + # ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + replicas: 1 + selector: + matchLabels: + app: registry-k8s-io-envoy + template: + metadata: + # annotations: + # lastcfg: | + # ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + spec: + containers: + - name: envoy + command: + - /usr/local/bin/envoy + - -c + - /etc/envoy.yaml + - -l + - debug + image: envoyproxy/envoy:v1.18.2 + volumeMounts: + - name: envoy-config + mountPath: /etc/envoy.yaml + subPath: envoy.yaml + - name: envoy-config-lds + mountPath: /var/lib/envoy/ + ports: + - name: http + containerPort: 10000 + volumes: + - name: envoy-config + configMap: + name: envoy-config + - name: envoy-config-lds + configMap: + name: envoy-config-lds +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + ports: + - name: registry-k8s-io + port: 10000 + protocol: TCP + targetPort: 10000 + - name: registry-k8s-io-admin + port: 9003 + protocol: TCP + targetPort: 9003 + selector: + app: registry-k8s-io-envoy + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry-k8s-io-envoy +spec: + rules: + - host: registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: registry-k8s-io-envoy + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry-k8s-io-envoy-admin +spec: + rules: + - host: registry-k8s-io-admin.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: registry-k8s-io-envoy + port: + number: 9003 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - registry-k8s-io-admin.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/registry-k8s-io-envoy-in-k8s.org b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/registry-k8s-io-envoy-in-k8s.org new file mode 100644 index 0000000..1c7091c --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting-hot-reload/registry-k8s-io-envoy-in-k8s.org @@ -0,0 +1,297 @@ +#+TITLE: registry.k8.io Envoy In K8s + +Set up Envoy to rewrite the requests, 302'ing the hostname and path to a registry that's chosen by an external service. + +* Deploying Envoy +** Envoy configuration +*** envoy-config.yaml +#+BEGIN_SRC yaml :tangle ./envoy-config.yaml +node: + id: web_service + cluster: web_service + +dynamic_resources: + lds_config: + path: /var/lib/envoy/lds.yaml + +static_resources: + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 +admin: + access_log_path: /dev/null + address: + socket_address: + address: 0.0.0.0 + port_value: 9003 +#+END_SRC + +*** envoy-lds.yaml +#+BEGIN_SRC yaml :tangle ./envoy-lds.yaml +resources: +- "@type": type.googleapis.com/envoy.config.listener.v3.Listener + name: listener_0 + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + name: envoy.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + local reg1 = "k8s.gcr.io" + local reg2 = "registry-1.docker.io" + local reg2WithIP = "192.168.0.18" + function envoy_on_request(request_handle) + local reg = reg1 + remoteAddr = request_handle:headers():get("x-real-ip") + if remoteAddr == reg2WithIP then + request_handle:logInfo("remoteAddr: "..reg2WithIP) + reg = reg2 + end + request_handle:logInfo("REG: "..reg) + request_handle:logInfo("REMOTEADDR: "..remoteAddr) + request_handle:logInfo("Hello") + request_handle:logInfo("My friend") + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} +#+END_SRC + +** Apply configuration +#+BEGIN_SRC shell :results silent +kubectl create configmap envoy-config --from-file=envoy\.yaml=envoy-config.yaml --dry-run=client -o yaml | kubectl apply -f - +kubectl create configmap envoy-config-lds --from-file=lds\.yaml=envoy-lds.yaml --dry-run=client -o yaml | kubectl apply -f - +#+END_SRC + +** Deploying Envoy +#+BEGIN_SRC yaml :tangle ./envoy.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + # annotations: + # lastcfg: | + # ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + replicas: 1 + selector: + matchLabels: + app: registry-k8s-io-envoy + template: + metadata: + # annotations: + # lastcfg: | + # ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + spec: + containers: + - name: envoy + command: + - /usr/local/bin/envoy + - -c + - /etc/envoy.yaml + - -l + - debug + image: envoyproxy/envoy:v1.18.2 + volumeMounts: + - name: envoy-config + mountPath: /etc/envoy.yaml + subPath: envoy.yaml + - name: envoy-config-lds + mountPath: /var/lib/envoy/ + ports: + - name: http + containerPort: 10000 + volumes: + - name: envoy-config + configMap: + name: envoy-config + - name: envoy-config-lds + configMap: + name: envoy-config-lds +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + ports: + - name: registry-k8s-io + port: 10000 + protocol: TCP + targetPort: 10000 + - name: registry-k8s-io-admin + port: 9003 + protocol: TCP + targetPort: 9003 + selector: + app: registry-k8s-io-envoy + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry-k8s-io-envoy +spec: + rules: + - host: registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: registry-k8s-io-envoy + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry-k8s-io-envoy-admin +spec: + rules: + - host: registry-k8s-io-admin.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: registry-k8s-io-envoy + port: + number: 9003 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - registry-k8s-io-admin.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +#+END_SRC + +#+BEGIN_SRC shell :results silent +envsubst < envoy.yaml | kubectl apply -f - +#+END_SRC + +#+BEGIN_SRC shell :results silent +kubectl delete -f envoy.yaml +#+END_SRC + +** A classic dose of debug +#+BEGIN_SRC shell +curl https://registry-k8s-io-admin.$SHARINGIO_PAIR_BASE_DNS_NAME/config_dump | jq -r '.configs[2].dynamic_listeners[0]' +#+END_SRC + +#+RESULTS: +#+begin_example +{ + "name": "listener_0", + "active_state": { + "listener": { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "listener_0", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 10000 + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.http_connection_manager", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "stat_prefix": "ingress_http", + "route_config": { + "name": "local_route", + "virtual_hosts": [ + { + "name": "local_service", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "web_service" + } + } + ] + } + ] + }, + "http_filters": [ + { + "name": "envoy.filters.http.lua", + "typed_config": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua", + "inline_code": "local reg1 = \"k8s.gcr.io\"\nlocal reg2 = \"registry-1.docker.io\"\nlocal reg2WithIP = \"192.168.0.18\"\nfunction envoy_on_request(request_handle)\n local reg = reg1\n remoteAddr = request_handle:headers():get(\"x-real-ip\")\n if remoteAddr == reg2WithIP then\n request_handle:logInfo(\"remoteAddr: \"..reg2WithIP)\n reg = reg2\n end\n request_handle:logInfo(\"REG: \"..reg)\n request_handle:logInfo(\"REMOTEADDR: \"..remoteAddr)\n request_handle:logInfo(\"Hello\")\n -- request_handle:logInfo(\"Hello\")\n if request_handle:headers():get(\":method\") == \"GET\" then\n request_handle:respond(\n {\n [\":status\"] = \"302\",\n [\"location\"] = \"https://\"..reg..request_handle:headers():get(\":path\"),\n [\"Content-Type\"] = \"text/html; charset=utf-8\",\n [\":authority\"] = \"web_service\"\n },\n ''..\"302\"..\".\\n\")\n end\nend\n" + } + }, + { + "name": "envoy.filters.http.router", + "typed_config": {} + } + ] + } + } + ] + } + ] + }, + "last_updated": "2021-04-30T02:09:15.237Z" + } +} +#+end_example diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/Dockerfile b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/Dockerfile new file mode 100644 index 0000000..2e90891 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/Dockerfile @@ -0,0 +1,31 @@ +# Envoy +# Prepare a container image for the reg-host-authority + +FROM golang:1.17.0-alpine3.14 AS build +WORKDIR /app +COPY main.go /app/ +COPY go.* *.go /app/ +ARG GOARCH="" +RUN CGO_ENABLED=0 GOOS=linux GOARCH="$GOARCH" go build \ + -a \ + -installsuffix cgo \ + -ldflags "-extldflags '-static' -s -w" \ + -o bin/reg-host-authority \ + main.go + +FROM alpine:3.14 AS extras +RUN apk add --no-cache tzdata ca-certificates +RUN adduser -D user + +FROM scratch AS final +WORKDIR /app +ENV PATH=/app \ + APP_DIST_FOLDER=./dist +COPY --from=build /app/bin/reg-host-authority /app/bin/reg-host-authority +COPY --from=extras /etc/passwd /etc/passwd +COPY --from=extras /etc/group /etc/group +COPY --from=extras /usr/share/zoneinfo /usr/share/zoneinfo +COPY --from=extras /etc/ssl /etc/ssl +EXPOSE 8080 +USER user +ENTRYPOINT ["/app/bin/reg-host-authority"] diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/envoy-config.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/envoy-config.yaml new file mode 100644 index 0000000..31d2bd9 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/envoy-config.yaml @@ -0,0 +1,88 @@ +# Envoy configuration + + +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + remoteAddr = request_handle:headers():get("x-real-ip") + local headers, body = request_handle:httpCall( + "humacs", + { + [":method"] = "GET", + [":path"] = "/", + [":authority"] = "humacs", + ["X-Real-Ip"] = remoteAddr + }, + remoteAddr, + 5000 + ) + reg = body + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 + - name: humacs + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: humacs + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: humacs-reg-host-author-8080 + port_value: 8080 diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/envoy.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/envoy.yaml new file mode 100644 index 0000000..ed69988 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/envoy.yaml @@ -0,0 +1,78 @@ +# Deploying Envoy + +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + replicas: 3 + selector: + matchLabels: + app: registry-k8s-io-envoy + template: + metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + spec: + containers: + - name: envoy + args: + - -c + - /etc/envoy/envoy.yaml + image: getenvoy/envoy:stable + volumeMounts: + - name: config + mountPath: /etc/envoy/envoy.yaml + subPath: envoy.yaml + ports: + - name: http + containerPort: 10000 + volumes: + - name: config + configMap: + name: envoy-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + ports: + - name: registry-k8s-io + port: 10000 + protocol: TCP + targetPort: 10000 + selector: + app: registry-k8s-io-envoy + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry-k8s-io-envoy +spec: + rules: + - host: registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: registry-k8s-io-envoy + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/Dockerfile b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/Dockerfile new file mode 100644 index 0000000..2e90891 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/Dockerfile @@ -0,0 +1,31 @@ +# Envoy +# Prepare a container image for the reg-host-authority + +FROM golang:1.17.0-alpine3.14 AS build +WORKDIR /app +COPY main.go /app/ +COPY go.* *.go /app/ +ARG GOARCH="" +RUN CGO_ENABLED=0 GOOS=linux GOARCH="$GOARCH" go build \ + -a \ + -installsuffix cgo \ + -ldflags "-extldflags '-static' -s -w" \ + -o bin/reg-host-authority \ + main.go + +FROM alpine:3.14 AS extras +RUN apk add --no-cache tzdata ca-certificates +RUN adduser -D user + +FROM scratch AS final +WORKDIR /app +ENV PATH=/app \ + APP_DIST_FOLDER=./dist +COPY --from=build /app/bin/reg-host-authority /app/bin/reg-host-authority +COPY --from=extras /etc/passwd /etc/passwd +COPY --from=extras /etc/group /etc/group +COPY --from=extras /usr/share/zoneinfo /usr/share/zoneinfo +COPY --from=extras /etc/ssl /etc/ssl +EXPOSE 8080 +USER user +ENTRYPOINT ["/app/bin/reg-host-authority"] diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/go.mod b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/go.mod new file mode 100644 index 0000000..0843383 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/go.mod @@ -0,0 +1,5 @@ +module reg-host-authority + +go 1.16 + +require github.com/gorilla/mux v1.8.0 diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/go.sum b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/go.sum new file mode 100644 index 0000000..5350288 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/go.sum @@ -0,0 +1,2 @@ +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/main.go b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/main.go new file mode 100644 index 0000000..a8c74b3 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "log" + "net/http" + "time" + + "github.com/gorilla/mux" +) + +// logging ... +// basic request logging middleware +func logging(next http.Handler) http.Handler { + // log all requests + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.Printf("%v %v %v %v %v/%v", r.Method, r.URL, r.Proto, r.Response, r.RemoteAddr, r.Header.Get("X-Real-Ip")) + next.ServeHTTP(w, r) + }) +} + +func decideHost(sourceIP string) string { + // get asns (a large list) + // use a BGP library to map out the route + // use sourceIP to find the closest + if sourceIP == "192.168.0.17" { + return "registry-1.docker.io" + } + return "k8s.gcr.io" +} + +// getRoot ... +// get root of API +func getRoot(w http.ResponseWriter, r *http.Request) { + host := decideHost(r.Header.Get("X-Real-Ip")) + log.Println(host) + w.WriteHeader(200) + w.Write([]byte(host)) +} + +func main() { + // bring up the API + port := ":8080" + router := mux.NewRouter() + + router.HandleFunc("/", getRoot) + router.Use(logging) + + srv := &http.Server{ + Handler: router, + Addr: port, + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + log.Println("Listening on", port) + log.Fatal(srv.ListenAndServe()) +} diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/registry-k8s-io-envoy-in-k8s.org b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/registry-k8s-io-envoy-in-k8s.org new file mode 100644 index 0000000..0ac9e6b --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/registry-k8s-io-envoy-in-k8s.org @@ -0,0 +1,266 @@ +#+TITLE: registry.k8.io Envoy In K8s + +Set up Envoy to rewrite the requests, 302'ing the hostname and path to a registry that's chosen by an external service. + +* Setting up an authority for the host +A simple webserver to dictate the host that Envoy will rewrite requests to, based on the source IP. + +#+BEGIN_SRC shell :results silent +mkdir reg-host-authority +#+END_SRC + +#+BEGIN_SRC go :tangle ./reg-host-authority/main.go +package main + +import ( + "log" + "net/http" + "time" + "os" + + "github.com/gorilla/mux" +) + +// logging ... +// basic request logging middleware +func logging(next http.Handler) http.Handler { + // log all requests + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.Printf("%v %v %v %v %v/%v", r.Method, r.URL, r.Proto, r.Response, r.RemoteAddr, r.Header.Get("X-Real-Ip")) + next.ServeHTTP(w, r) + }) +} + +func decideHost(sourceIP string) string { + // get asns (a large list) + // use a BGP library to map out the route + // use sourceIP to find the closest + if sourceIP == os.Getenv("LOCAL_IP") { + return "registry-1.docker.io" + } + return "k8s.gcr.io" +} + +// getRoot ... +// get root of API +func getRoot(w http.ResponseWriter, r *http.Request) { + host := decideHost(r.Header.Get("X-Real-Ip")) + log.Println(host) + w.WriteHeader(200) + w.Write([]byte(host)) +} + +func main() { + // bring up the API + port := ":8080" + router := mux.NewRouter() + + router.HandleFunc("/", getRoot) + router.Use(logging) + + srv := &http.Server{ + Handler: router, + Addr: port, + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + log.Println("Listening on", port) + log.Fatal(srv.ListenAndServe()) +} +#+END_SRC + +#+BEGIN_SRC shell :dir ./reg-host-authority :results silent +go mod init reg-host-authority +#+END_SRC + +#+BEGIN_SRC shell :dir ./reg-host-authority :results silent +go get ./... +#+END_SRC + +#+BEGIN_SRC tmate :dir ./reg-host-authority :results silent :window reg-host-authority +go run . +#+END_SRC + +* Deploying Envoy +** Envoy configuration + +#+BEGIN_SRC yaml :tangle ./envoy-config.yaml +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + remoteAddr = request_handle:headers():get("x-real-ip") + local headers, body = request_handle:httpCall( + "humacs", + { + [":method"] = "GET", + [":path"] = "/", + [":authority"] = "humacs", + ["X-Real-Ip"] = remoteAddr + }, + remoteAddr, + 5000 + ) + reg = body + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 + - name: humacs + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: humacs + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: humacs-reg-host-author-8080 + port_value: 8080 +#+END_SRC + +#+BEGIN_SRC shell :results silent +kubectl create configmap envoy-config --from-file=envoy\.yaml=envoy-config.yaml --dry-run=client -o yaml | kubectl apply -f - +#+END_SRC + +** Deploying Envoy +#+BEGIN_SRC yaml :tangle ./envoy.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + replicas: 3 + selector: + matchLabels: + app: registry-k8s-io-envoy + template: + metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: registry-k8s-io-envoy + spec: + containers: + - name: envoy + args: + - -c + - /etc/envoy/envoy.yaml + image: envoyproxy/envoy-distroless:v1.20.0 + volumeMounts: + - name: config + mountPath: /etc/envoy/envoy.yaml + subPath: envoy.yaml + ports: + - name: http + containerPort: 10000 + volumes: + - name: config + configMap: + name: envoy-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: registry-k8s-io-envoy + name: registry-k8s-io-envoy +spec: + ports: + - name: registry-k8s-io + port: 10000 + protocol: TCP + targetPort: 10000 + selector: + app: registry-k8s-io-envoy + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: registry-k8s-io-envoy +spec: + rules: + - host: registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: registry-k8s-io-envoy + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - registry-k8s-io.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +#+END_SRC + +#+BEGIN_SRC shell :results silent +export ENVOY_LAST_CFG=$(cat envoy-config.yaml | sha256sum) +envsubst < envoy.yaml | kubectl apply -f - +#+END_SRC + +#+BEGIN_SRC shell :results silent +kubectl delete -f envoy.yaml +#+END_SRC diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/Dockerfile b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/Dockerfile new file mode 100644 index 0000000..b0c2388 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/Dockerfile @@ -0,0 +1,10 @@ +# Deploying in Kubernetes +# Create container image + +FROM tinygo/tinygo:0.20.0 AS build +WORKDIR /app +COPY go.* *.go /app/ +RUN tinygo build -o wasm.wasm -scheduler=none -target=wasi /app/main.go + +FROM envoyproxy/envoy-distroless:v1.20.0 +COPY --from=build /app/wasm.wasm /etc/envoy/wasm.wasm diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/README.org b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/README.org new file mode 100644 index 0000000..f5f7fb1 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/README.org @@ -0,0 +1,331 @@ +#+TITLE: Envoy WASM dynamic host rewriting + +#+begin_quote +Rewrite the location header with Envoy, written in Golang and compiled as WASM +#+end_quote + +* The code +#+begin_src go :tangle main.go +package main + +import ( + "fmt" + "os" + + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" +) + +const ( + realIPKey = "x-real-ip" + matchIPKey = "MATCH_IP" + locationKey = "location" + authorityKey = ":authority" + statusKey = ":status" + pathKey = ":path" + statusCode = 302 + defaultHost = "k8s.gcr.io" + rewriteHost = "registry-1.docker.io" +) + +var ( + authority = os.Getenv("AUTHORITY") + matchIP = os.Getenv(matchIPKey) +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + // Embed the default VM context here, + // so that we don't need to reimplement all the methods. + types.DefaultVMContext +} + +// Override types.DefaultVMContext. +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{} +} + +type pluginContext struct { + // Embed the default plugin context here, + // so that we don't need to reimplement all the methods. + types.DefaultPluginContext +} + +// Override types.DefaultPluginContext. +func (*pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + return &httpRouting{} +} + +type httpRouting struct { + // Embed the default http context here, + // so that we don't need to reimplement all the methods. + types.DefaultHttpContext + bodySize int + endOfStream bool +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + return types.OnPluginStartStatusOK +} + +// Override types.DefaultHttpContext. +func (ctx *httpRouting) OnHttpRequestHeaders(numHeaders int, endOfStream bool) types.Action { + host := defaultHost + remoteAddr, err := proxywasm.GetHttpRequestHeader(realIPKey) + if err != nil { + proxywasm.LogCritical(fmt.Sprintf("Error: getting request header: '%v'", realIPKey)) + } + if matchIP == remoteAddr { + host = rewriteHost + } + + path, _ := proxywasm.GetHttpRequestHeader(pathKey) + body := fmt.Sprintf(`%v.`, host, path, statusCode) + if err := proxywasm.SendHttpResponse(statusCode, [][2]string{ + {authorityKey, authority}, + {locationKey, fmt.Sprintf("https://%v%v", host, path)}, + {statusKey, fmt.Sprintf("%s", statusCode)}, + {pathKey, path}, + }, []byte(body)); err != nil { + proxywasm.LogErrorf("Error: sending http response: %v", err) + proxywasm.ResumeHttpRequest() + } + return types.ActionPause +} + +func (ctx *pluginContext) OnTick() {} +#+end_src + +Compile with tinygo +#+begin_src tmate :window build-wasm :prologue "docker run --rm --user $(id -u):$(id -g) --tmpfs /.cache --tmpfs /go -v $(pwd):$(pwd) --workdir=$(pwd) tinygo/tinygo:0.20.0 \\" +tinygo build -o wasm.wasm -scheduler=none -target=wasi . +#+end_src + +* Configure +#+begin_src yaml :tangle ./envoy-config.yaml +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.wasm + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + config: + name: "wasm" + root_id: "wasm_root" + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + { + } + vm_config: + runtime: "envoy.wasm.runtime.v8" + vm_id: "wasm_vm" + code: + local: + filename: "/etc/envoy/wasm.wasm" + environment_variables: + host_env_keys: + - MATCH_IP + key_values: + AUTHORITY: web_service + - name: envoy.filters.http.router + typed_config: {} + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 +#+end_src + +* Testing in Docker +#+begin_src tmate :window envoy +docker \ + run \ + -it \ + --rm \ + -p 10000:10000 \ + -v $PWD/envoy-config.yaml:/etc/envoy/envoy.yaml \ + -v $PWD/wasm.wasm:/etc/envoy/wasm.wasm \ + -e MATCH_IP="$(kubectl -n "${SHARINGIO_PAIR_NAME}" get pod "${SHARINGIO_PAIR_NAME}-humacs-0" -o=jsonpath='{.status.podIP}')" \ + envoyproxy/envoy-distroless:v1.20.0 \ + -c /etc/envoy/envoy.yaml +#+end_src + +* Deploying in Kubernetes +Create container image +#+begin_src dockerfile :tangle Dockerfile +FROM tinygo/tinygo:0.20.0 AS build +WORKDIR /app +COPY go.* *.go /app/ +RUN tinygo build -o wasm.wasm -scheduler=none -target=wasi /app/main.go + +FROM envoyproxy/envoy-distroless:v1.20.0 +COPY --from=build /app/wasm.wasm /etc/envoy/wasm.wasm +#+end_src + +Build the container image +#+begin_src tmate :window build-wasm +docker build -t envoy-with-registry-k8s-io-wasm . +#+end_src + +Push the image out to the other nodes +#+begin_src shell +for NODE_IP in $(kubectl get nodes -l node-role.kubernetes.io/control-plane!='' -o=jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'); do + printf "${NODE_IP} :: " + docker save envoy-with-registry-k8s-io-wasm:latest | ssh "root@${NODE_IP}" docker load +done +#+end_src + +Create a namespace +#+begin_src shell +kubectl create namespace k8s-reg-envoy-wasm -o yaml --dry-run=client | \ + kubectl apply -f - +kubectl label namespace k8s-reg-envoy-wasm cert-manager-tls=sync --overwrite +#+end_src + +#+RESULTS: +#+begin_example +namespace/k8s-reg-envoy-wasm created +namespace/k8s-reg-envoy-wasm labeled +#+end_example + +Create a ConfigMap for the config +#+BEGIN_SRC shell :results silent +kubectl -n k8s-reg-envoy-wasm create configmap config --from-file=envoy\.yaml=./envoy-config.yaml --dry-run=client -o yaml | kubectl apply -f - +#+END_SRC + +Configuring Envoy +#+BEGIN_SRC yaml :tangle ./envoy.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-wasm + name: k8s-reg-envoy-wasm +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-envoy-wasm + template: + metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-wasm + spec: + containers: + - name: envoy + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + env: + - name: MATCH_IP + value: ${MATCH_IP} + args: + - -c + - /etc/envoy/envoy.yaml + image: envoy-with-registry-k8s-io-wasm:latest + imagePullPolicy: Never + volumeMounts: + - name: config + mountPath: /etc/envoy/envoy.yaml + subPath: envoy.yaml + ports: + - name: http + containerPort: 10000 + volumes: + - name: config + configMap: + name: config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: k8s-reg-envoy-wasm + name: k8s-reg-envoy-wasm +spec: + ports: + - name: wasm + port: 10000 + protocol: TCP + targetPort: 10000 + selector: + app: k8s-reg-envoy-wasm + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-envoy-wasm +spec: + rules: + - host: k8s-reg-envoy-wasm.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-envoy-wasm + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-envoy-wasm.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +#+END_SRC + +#+BEGIN_SRC shell :results silent +export \ + ENVOY_LAST_CFG=$(cat Dockerfile main.go envoy-config.yaml wasm.wasm | sha256sum) \ + MATCH_IP="$(kubectl -n "${SHARINGIO_PAIR_NAME}" get pod "${SHARINGIO_PAIR_NAME}-humacs-0" -o=jsonpath='{.status.podIP}')" +envsubst < envoy.yaml | kubectl -n k8s-reg-envoy-wasm apply -f - +#+END_SRC + +* Notes and references +- https://tufin.medium.com/extending-envoy-proxy-with-golang-webassembly-e51202809ba6 +- https://github.com/mstrYoda/envoy-proxy-wasm-filter-golang/blob/master/main.go +- https://github.com/tetratelabs/proxy-wasm-go-sdk/blob/main/examples/http_routing/main.go diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/envoy-config.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/envoy-config.yaml new file mode 100644 index 0000000..3481346 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/envoy-config.yaml @@ -0,0 +1,66 @@ +# Configure + +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.wasm + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + config: + name: "wasm" + root_id: "wasm_root" + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + { + } + vm_config: + runtime: "envoy.wasm.runtime.v8" + vm_id: "wasm_vm" + code: + local: + filename: "/etc/envoy/wasm.wasm" + environment_variables: + host_env_keys: + - MATCH_IP + key_values: + AUTHORITY: web_service + - name: envoy.filters.http.router + typed_config: {} + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/envoy.yaml b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/envoy.yaml new file mode 100644 index 0000000..44b9766 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/envoy.yaml @@ -0,0 +1,89 @@ + + +# Configuring Envoy + +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-wasm + name: k8s-reg-envoy-wasm +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-envoy-wasm + template: + metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-wasm + spec: + containers: + - name: envoy + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + env: + - name: MATCH_IP + value: ${MATCH_IP} + args: + - -c + - /etc/envoy/envoy.yaml + image: envoy-with-registry-k8s-io-wasm:latest + imagePullPolicy: Never + volumeMounts: + - name: config + mountPath: /etc/envoy/envoy.yaml + subPath: envoy.yaml + ports: + - name: http + containerPort: 10000 + volumes: + - name: config + configMap: + name: config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: k8s-reg-envoy-wasm + name: k8s-reg-envoy-wasm +spec: + ports: + - name: wasm + port: 10000 + protocol: TCP + targetPort: 10000 + selector: + app: k8s-reg-envoy-wasm + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-envoy-wasm +spec: + rules: + - host: k8s-reg-envoy-wasm.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-envoy-wasm + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-envoy-wasm.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/go.mod b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/go.mod new file mode 100644 index 0000000..f519627 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/go.mod @@ -0,0 +1,5 @@ +module envoy-wasm-dynamic-host-rewriting + +go 1.17 + +require github.com/tetratelabs/proxy-wasm-go-sdk v0.14.0 diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/go.sum b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/go.sum new file mode 100644 index 0000000..d4c5962 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tetratelabs/proxy-wasm-go-sdk v0.14.0 h1:bZz1YG4sfQSyLuzJJz8cI8akIURgoSG5WSq57joAVto= +github.com/tetratelabs/proxy-wasm-go-sdk v0.14.0/go.mod h1:qZ+4i6e2wHlhnhgpH0VG4QFzqd2BEvQbQFU0npt2e2k= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/main.go b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/main.go new file mode 100644 index 0000000..ed37645 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/envoy-wasm-dynamic-host-rewriting/main.go @@ -0,0 +1,93 @@ +// The code + +package main + +import ( + "fmt" + "os" + + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" +) + +const ( + realIPKey = "x-real-ip" + matchIPKey = "MATCH_IP" + locationKey = "location" + authorityKey = ":authority" + statusKey = ":status" + pathKey = ":path" + statusCode = 302 + defaultHost = "k8s.gcr.io" + rewriteHost = "registry-1.docker.io" +) + +var ( + authority = os.Getenv("AUTHORITY") + matchIP = os.Getenv(matchIPKey) +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + // Embed the default VM context here, + // so that we don't need to reimplement all the methods. + types.DefaultVMContext +} + +// Override types.DefaultVMContext. +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{} +} + +type pluginContext struct { + // Embed the default plugin context here, + // so that we don't need to reimplement all the methods. + types.DefaultPluginContext +} + +// Override types.DefaultPluginContext. +func (*pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + return &httpRouting{} +} + +type httpRouting struct { + // Embed the default http context here, + // so that we don't need to reimplement all the methods. + types.DefaultHttpContext + bodySize int + endOfStream bool +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + return types.OnPluginStartStatusOK +} + +// Override types.DefaultHttpContext. +func (ctx *httpRouting) OnHttpRequestHeaders(numHeaders int, endOfStream bool) types.Action { + host := defaultHost + remoteAddr, err := proxywasm.GetHttpRequestHeader(realIPKey) + if err != nil { + proxywasm.LogCritical(fmt.Sprintf("Error: getting request header: '%v'", realIPKey)) + } + if matchIP == remoteAddr { + host = rewriteHost + } + + path, _ := proxywasm.GetHttpRequestHeader(pathKey) + body := fmt.Sprintf(`%v.`, host, path, statusCode) + if err := proxywasm.SendHttpResponse(statusCode, [][2]string{ + {authorityKey, authority}, + {locationKey, fmt.Sprintf("https://%v%v", host, path)}, + {statusKey, fmt.Sprintf("%s", statusCode)}, + {pathKey, path}, + }, []byte(body)); err != nil { + proxywasm.LogErrorf("Error: sending http response: %v", err) + proxywasm.ResumeHttpRequest() + } + return types.ActionPause +} + +func (ctx *pluginContext) OnTick() {} diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/README.org b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/README.org new file mode 100644 index 0000000..06a538d --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/README.org @@ -0,0 +1,599 @@ +#+TITLE: Load testing the candidates + +Pair instance settings + +| Env | Value | +|------------------------------------------+---------------| +| __SHARINGIO_NO_GITHUB_TOKEN | false | +| __SHARINGIO_PAIR_NODE_SIZE | m1.xlarge.x86 | +| __SHARINGIO_PAIR_KUBERNETES_WORKER_NODES | 2 | + +* Preparing +** ArtifactServer +#+begin_src tmate :window registry-a-prepare +git-clone-structured https://github.com/kubernetes/k8s.io +cd ~/kubernetes/k8s.io +git remote add ii https://github.com/ii/k8s.io +git fetch ii +git checkout update-artifactserver-with-conditions-and-config-file +#+end_src + +Prepare a container image +#+begin_src dockerfile :tangle ~/kubernetes/k8s.io/artifactserver/Dockerfile +FROM golang:1.17.0-alpine3.14 AS build +WORKDIR /app +COPY cmd /app/cmd +COPY go.* *.go /app/ +ARG GOARCH="" +RUN CGO_ENABLED=0 GOOS=linux GOARCH="$GOARCH" go build \ + -a \ + -installsuffix cgo \ + -ldflags "-extldflags '-static' -s -w" \ + -o bin/artifactserver \ + cmd/artifactserver/main.go + +FROM alpine:3.14 AS extras +RUN apk add --no-cache tzdata ca-certificates +RUN adduser -D user + +FROM scratch AS final +WORKDIR /app +ENV PATH=/app \ + APP_DIST_FOLDER=./dist +COPY --from=build /app/bin/artifactserver /app/bin/artifactserver +COPY --from=extras /etc/passwd /etc/passwd +COPY --from=extras /etc/group /etc/group +COPY --from=extras /usr/share/zoneinfo /usr/share/zoneinfo +COPY --from=extras /etc/ssl /etc/ssl +EXPOSE 8080 +USER user +ENTRYPOINT ["/app/bin/artifactserver"] +#+end_src + +Build the container image +#+begin_src tmate :window registry-a :dir ~/kubernetes/k8s.io/artifactserver/ +docker build -t artifactserver . +#+end_src + +Push the image out to the other nodes +#+begin_src tmate :window registry-a +for NODE_IP in $(kubectl get nodes -l node-role.kubernetes.io/control-plane!='' -o=jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'); do + printf "${NODE_IP} :: " + docker save artifactserver:latest | ssh "root@${NODE_IP}" docker load +done +#+end_src + +Configure ArtifactServer +#+begin_src yaml :tangle artifactserver-config.yaml +backends: + kops: + host: kubeupv2.s3.amazonaws.com + conditions: + paths: + - /kops/ + local-distribution: + host: registry-1.docker.io + conditions: + headers: + X-Real-Ip: + - ${HUMACS_POD_IP} + k8s.gcr.io: + host: k8s.gcr.io +#+end_src + +Configure the Kubernetes deployment +#+begin_src yaml :tangle ./artifactserver.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: k8s-reg-artifactserver + labels: + cert-manager-tls: sync +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: k8s-reg-artifactserver + name: k8s-reg-artifactserver + labels: + app: k8s-reg-artifactserver +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-artifactserver + template: + metadata: + labels: + app: k8s-reg-artifactserver + annotations: + lastcfg: | + ${ARTIFACTSERVER_LAST_CFG} + spec: + terminationGracePeriodSeconds: 30 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - artifactserver + topologyKey: "kubernetes.io/hostname" + containers: + - name: k8s-reg-artifactserver + image: artifactserver:latest + imagePullPolicy: Never + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + args: + - --config=/etc/artifactserver/config.yaml + ports: + - containerPort: 8080 + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/artifactserver + resources: + requests: + cpu: 0.1 + memory: 256Mi + limits: + memory: 256Mi + volumes: + - name: config + configMap: + name: k8s-reg-artifactserver +--- +apiVersion: v1 +kind: Service +metadata: + namespace: k8s-reg-artifactserver + name: k8s-reg-artifactserver + labels: + app: k8s-reg-artifactserver +spec: + selector: + app: k8s-reg-artifactserver + type: NodePort + ports: + - name: http + port: 8080 + targetPort: 8080 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-artifactserver + namespace: k8s-reg-artifactserver +spec: + rules: + - host: k8s-reg-artifactserver.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-artifactserver + port: + number: 8080 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-artifactserver.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +#+end_src + +Apply the manifests +#+begin_src shell +export ARTIFACTSERVER_LAST_CFG=$(cat artifactserver-config.yaml | sha256sum) +envsubst < ./artifactserver.yaml | kubectl apply -f - +export HUMACS_POD_IP=$(kubectl get pods $(hostname) -o=jsonpath='{.status.podIP}') +kubectl -n k8s-reg-artifactserver create configmap k8s-reg-artifactserver \ + --from-file=config\.yaml=<(envsubst < artifactserver-config.yaml) \ + -o yaml --dry-run=client \ + | kubectl apply -f - +#+end_src + +#+RESULTS: +#+begin_example +namespace/k8s-reg-artifactserver created +deployment.apps/k8s-reg-artifactserver created +service/k8s-reg-artifactserver created +ingress.networking.k8s.io/k8s-reg-artifactserver created +configmap/k8s-reg-artifactserver created +#+end_example + +Test it from the Service +#+begin_src shell +curl -v http://k8s-reg-artifactserver.artifactserver:8080 2>&1 +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +** Envoy +Prepare a container image for the reg-host-authority +#+begin_src dockerfile :tangle ~/ii/org/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority/Dockerfile +FROM golang:1.17.0-alpine3.14 AS build +WORKDIR /app +COPY main.go /app/ +COPY go.* *.go /app/ +ARG GOARCH="" +RUN CGO_ENABLED=0 GOOS=linux GOARCH="$GOARCH" go build \ + -a \ + -installsuffix cgo \ + -ldflags "-extldflags '-static' -s -w" \ + -o bin/reg-host-authority \ + main.go + +FROM alpine:3.14 AS extras +RUN apk add --no-cache tzdata ca-certificates +RUN adduser -D user + +FROM scratch AS final +WORKDIR /app +ENV PATH=/app \ + APP_DIST_FOLDER=./dist +COPY --from=build /app/bin/reg-host-authority /app/bin/reg-host-authority +COPY --from=extras /etc/passwd /etc/passwd +COPY --from=extras /etc/group /etc/group +COPY --from=extras /usr/share/zoneinfo /usr/share/zoneinfo +COPY --from=extras /etc/ssl /etc/ssl +EXPOSE 8080 +USER user +ENTRYPOINT ["/app/bin/reg-host-authority"] +#+end_src + +Build the container image +#+begin_src tmate :window registry-e :dir ~/ii/org/research/k8s-infra-registry-artifacts-migration/envoy-dynamic-host-rewriting/reg-host-authority +docker build -t reg-host-authority . +#+end_src + +Push the image out to the other nodes +#+begin_src shell +for NODE_IP in $(kubectl get nodes -l node-role.kubernetes.io/control-plane!='' -o=jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'); do + printf "${NODE_IP} :: " + docker save reg-host-authority:latest | ssh "root@${NODE_IP}" docker load +done +#+end_src + +#+RESULTS: +#+begin_example +145.40.67.1 :: Loaded image: reg-host-authority:latest +#+end_example +Prepare the envoy configuration +#+begin_src yaml :tangle ./envoy-config.yaml +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + remoteAddr = request_handle:headers():get("x-real-ip") + local headers, body = request_handle:httpCall( + "reg-host-authority", + { + [":method"] = "GET", + [":path"] = "/", + [":authority"] = "humacs", + ["X-Real-Ip"] = remoteAddr + }, + remoteAddr, + 5000 + ) + reg = body + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 + - name: reg-host-authority + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: humacs + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: reg-host-authority + port_value: 8080 +#+end_src + +Configure the Kubernetes deployment +#+begin_src yaml :tangle ./envoy-reg-host-authority.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: reg-host-authority + name: reg-host-authority + namespace: k8s-reg-envoy-lua-go +spec: + replicas: 4 + selector: + matchLabels: + app: reg-host-authority + template: + metadata: + labels: + app: reg-host-authority + spec: + containers: + - name: envoy + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + image: reg-host-authority:latest + imagePullPolicy: Never + ports: + - name: http + containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: reg-host-authority + name: reg-host-authority + namespace: k8s-reg-envoy-lua-go +spec: + type: ClusterIP + ports: + - name: registry-k8s-io + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app: reg-host-authority +#+end_src +#+begin_src yaml :tangle ./envoy.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: k8s-reg-envoy-lua-go + labels: + cert-manager-tls: sync +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-lua-go + name: k8s-reg-envoy-lua-go + namespace: k8s-reg-envoy-lua-go +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-envoy-lua-go + template: + metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-lua-go + spec: + containers: + - name: envoy + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + args: + - -c + - /etc/envoy/envoy.yaml + image: envoyproxy/envoy-distroless:v1.20.0 + volumeMounts: + - name: config + mountPath: /etc/envoy/envoy.yaml + subPath: envoy.yaml + ports: + - name: http + containerPort: 10000 + volumes: + - name: config + configMap: + name: envoy-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: k8s-reg-envoy-lua-go + name: k8s-reg-envoy-lua-go + namespace: k8s-reg-envoy-lua-go +spec: + ports: + - name: registry-k8s-io + port: 10000 + protocol: TCP + targetPort: 10000 + selector: + app: k8s-reg-envoy-lua-go + type: NodePort +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-envoy-lua-go + namespace: k8s-reg-envoy-lua-go +spec: + rules: + - host: envoy.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-envoy-lua-go + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-envoy-lua-go.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +#+end_src + +Apply the Envoy manifests +#+BEGIN_SRC shell +export ENVOY_LAST_CFG=$(cat envoy-config.yaml | sha256sum) +envsubst < envoy.yaml | kubectl apply -f - +kubectl -n k8s-reg-envoy-lua-go apply -f envoy-reg-host-authority.yaml +kubectl -n k8s-reg-envoy-lua-go create configmap envoy-config --from-file=envoy\.yaml=envoy-config.yaml --dry-run=client -o yaml | kubectl apply -f - +#+END_SRC + +#+RESULTS: +#+begin_example +namespace/k8s-reg-envoy-lua-go unchanged +deployment.apps/k8s-reg-envoy-lua-go unchanged +service/k8s-reg-envoy-lua-go unchanged +ingress.networking.k8s.io/k8s-reg-envoy-lua-go unchanged +deployment.apps/reg-host-authority created +service/reg-host-authority created +configmap/envoy-config created +#+end_example + +Test it from the Service +#+begin_src shell +curl -v http://reg-host-authority.k8s-reg-envoy-lua-go:8080 2>&1 +#+end_src + +#+RESULTS: +#+begin_example + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0* Trying 10.106.186.118:8080... +,* TCP_NODELAY set +,* Connected to reg-host-authority.k8s-reg-envoy-lua-go (10.106.186.118) port 8080 (#0) +> GET / HTTP/1.1 +> Host: reg-host-authority.k8s-reg-envoy-lua-go:8080 +> User-Agent: curl/7.68.0 +> Accept: */* +> +,* Mark bundle as not supporting multiuse +< HTTP/1.1 200 OK +< Date: Thu, 21 Oct 2021 19:58:13 GMT +< Content-Length: 20 +< Content-Type: text/plain; charset=utf-8 +< +{ [20 bytes data] + 100 20 100 20 0 0 6666 0 --:--:-- --:--:-- --:--:-- 6666 +,* Connection #0 to host reg-host-authority.k8s-reg-envoy-lua-go left intact +registry-1.docker.io +#+end_example + +#+begin_src shell +curl -v http://k8s-reg-envoy-lua-go.k8s-reg-envoy-lua-go:10000 2>&1 +#+end_src + +#+RESULTS: +#+begin_example + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0* Trying 10.105.25.30:10000... +,* TCP_NODELAY set +,* Connected to k8s-reg-envoy-lua-go.k8s-reg-envoy-lua-go (10.105.25.30) port 10000 (#0) +> GET / HTTP/1.1 +> Host: k8s-reg-envoy-lua-go.k8s-reg-envoy-lua-go:10000 +> User-Agent: curl/7.68.0 +> Accept: */* +> +,* Mark bundle as not supporting multiuse +< HTTP/1.1 302 Found +< host: web_service +< content-type: text/html; charset=utf-8 +< location: https://registry-1.docker.io/ +< content-length: 49 +< date: Thu, 21 Oct 2021 19:58:39 GMT +< server: envoy +< +{ [49 bytes data] + 100 49 100 49 0 0 8166 0 --:--:-- --:--:-- --:--:-- 8166 +,* Connection #0 to host k8s-reg-envoy-lua-go.k8s-reg-envoy-lua-go left intact +302. +#+end_example + +* Load Testing +#+begin_src javascript :tangle ./k6-artifactserver.js +import http from "k6/http"; +import { check, sleep } from "k6"; + +export let options = { + stages: [{ duration: "10m", target: 1 * 1000 * 1000 }], + maxRedirects: 0, +}; + +export default function () { + let res = http.get("https://artifactserver.bobymcbobs.pair.sharing.io"); + check(res, { "status was 302": (r) => r.status == 302 }); + // console.log(JSON.stringify(res)); + sleep(1); +} +#+end_src diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/artifactserver-config.yaml b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/artifactserver-config.yaml new file mode 100644 index 0000000..7857c26 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/artifactserver-config.yaml @@ -0,0 +1,18 @@ + + +# Configure ArtifactServer + +backends: + kops: + host: kubeupv2.s3.amazonaws.com + conditions: + paths: + - /kops/ + local-distribution: + host: registry-1.docker.io + conditions: + headers: + X-Real-Ip: + - ${HUMACS_POD_IP} + k8s.gcr.io: + host: k8s.gcr.io diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/artifactserver.yaml b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/artifactserver.yaml new file mode 100644 index 0000000..efafd0f --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/artifactserver.yaml @@ -0,0 +1,109 @@ + + +# Configure the Kubernetes deployment + +apiVersion: v1 +kind: Namespace +metadata: + name: k8s-reg-artifactserver + labels: + cert-manager-tls: sync +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: k8s-reg-artifactserver + name: k8s-reg-artifactserver + labels: + app: k8s-reg-artifactserver +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-artifactserver + template: + metadata: + labels: + app: k8s-reg-artifactserver + annotations: + lastcfg: | + ${ARTIFACTSERVER_LAST_CFG} + spec: + terminationGracePeriodSeconds: 30 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - artifactserver + topologyKey: "kubernetes.io/hostname" + containers: + - name: k8s-reg-artifactserver + image: artifactserver:latest + imagePullPolicy: Never + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + args: + - --config=/etc/artifactserver/config.yaml + ports: + - containerPort: 8080 + protocol: TCP + volumeMounts: + - name: config + mountPath: /etc/artifactserver + resources: + requests: + cpu: 0.1 + memory: 256Mi + limits: + memory: 256Mi + volumes: + - name: config + configMap: + name: k8s-reg-artifactserver +--- +apiVersion: v1 +kind: Service +metadata: + namespace: k8s-reg-artifactserver + name: k8s-reg-artifactserver + labels: + app: k8s-reg-artifactserver +spec: + selector: + app: k8s-reg-artifactserver + type: NodePort + ports: + - name: http + port: 8080 + targetPort: 8080 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-artifactserver + namespace: k8s-reg-artifactserver +spec: + rules: + - host: k8s-reg-artifactserver.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-artifactserver + port: + number: 8080 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-artifactserver.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy-config.yaml b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy-config.yaml new file mode 100644 index 0000000..772559d --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy-config.yaml @@ -0,0 +1,93 @@ + + +# #+RESULTS: +# #+begin_example +# 145.40.67.1 :: Loaded image: reg-host-authority:latest +# #+end_example +# Prepare the envoy configuration + +static_resources: + listeners: + - name: main + address: + socket_address: + address: 0.0.0.0 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + codec_type: auto + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: web_service + http_filters: + - name: envoy.filters.http.lua + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inline_code: | + function envoy_on_request(request_handle) + remoteAddr = request_handle:headers():get("x-real-ip") + local headers, body = request_handle:httpCall( + "reg-host-authority", + { + [":method"] = "GET", + [":path"] = "/", + [":authority"] = "humacs", + ["X-Real-Ip"] = remoteAddr + }, + remoteAddr, + 5000 + ) + reg = body + if request_handle:headers():get(":method") == "GET" then + request_handle:respond( + { + [":status"] = "302", + ["location"] = "https://"..reg..request_handle:headers():get(":path"), + ["Content-Type"] = "text/html; charset=utf-8", + [":authority"] = "web_service" + }, + ''.."302"..".\n") + end + end + - name: envoy.filters.http.router + typed_config: {} + + clusters: + - name: web_service + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: web_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: k8s.io + port_value: 443 + - name: reg-host-authority + connect_timeout: 0.25s + type: LOGICAL_DNS + lb_policy: round_robin + load_assignment: + cluster_name: humacs + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: reg-host-authority + port_value: 8080 diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy-reg-host-authority.yaml b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy-reg-host-authority.yaml new file mode 100644 index 0000000..2be191c --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy-reg-host-authority.yaml @@ -0,0 +1,50 @@ + + +# Configure the Kubernetes deployment + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: reg-host-authority + name: reg-host-authority + namespace: k8s-reg-envoy-lua-go +spec: + replicas: 4 + selector: + matchLabels: + app: reg-host-authority + template: + metadata: + labels: + app: reg-host-authority + spec: + containers: + - name: envoy + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + image: reg-host-authority:latest + imagePullPolicy: Never + ports: + - name: http + containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: reg-host-authority + name: reg-host-authority + namespace: k8s-reg-envoy-lua-go +spec: + type: ClusterIP + ports: + - name: registry-k8s-io + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + app: reg-host-authority diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy.yaml b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy.yaml new file mode 100644 index 0000000..def44d5 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/envoy.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: k8s-reg-envoy-lua-go + labels: + cert-manager-tls: sync +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-lua-go + name: k8s-reg-envoy-lua-go + namespace: k8s-reg-envoy-lua-go +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-envoy-lua-go + template: + metadata: + annotations: + lastcfg: | + ${ENVOY_LAST_CFG} + labels: + app: k8s-reg-envoy-lua-go + spec: + containers: + - name: envoy + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + args: + - -c + - /etc/envoy/envoy.yaml + image: envoyproxy/envoy-distroless:v1.20.0 + volumeMounts: + - name: config + mountPath: /etc/envoy/envoy.yaml + subPath: envoy.yaml + ports: + - name: http + containerPort: 10000 + volumes: + - name: config + configMap: + name: envoy-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: k8s-reg-envoy-lua-go + name: k8s-reg-envoy-lua-go + namespace: k8s-reg-envoy-lua-go +spec: + ports: + - name: registry-k8s-io + port: 10000 + protocol: TCP + targetPort: 10000 + selector: + app: k8s-reg-envoy-lua-go + type: NodePort +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-envoy-lua-go + namespace: k8s-reg-envoy-lua-go +spec: + rules: + - host: envoy.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-envoy-lua-go + port: + number: 10000 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-envoy-lua-go.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod diff --git a/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/k6-artifactserver.js b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/k6-artifactserver.js new file mode 100644 index 0000000..4969efd --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/load-testing-the-candidates/k6-artifactserver.js @@ -0,0 +1,16 @@ +// Load Testing + +import http from "k6/http"; +import { check, sleep } from "k6"; + +export let options = { + stages: [{ duration: "10m", target: 1 * 1000 * 1000 }], + maxRedirects: 0, +}; + +export default function () { + let res = http.get("https://artifactserver.bobymcbobs.pair.sharing.io"); + check(res, { "status was 302": (r) => r.status == 302 }); + // console.log(JSON.stringify(res)); + sleep(1); +} diff --git a/research/k8s-infra-registry-artifacts-migration/match-ip-to-iprange/README.org b/research/k8s-infra-registry-artifacts-migration/match-ip-to-iprange/README.org new file mode 100644 index 0000000..16a5464 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/match-ip-to-iprange/README.org @@ -0,0 +1,494 @@ +#+TITLE: Match IP to IP range + +* Bringing up Postgres +** Secrets +#+name: postgres-secret +#+begin_src yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-configuration +stringData: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + PGDATABASE: postgres + PGUSER: postgres +#+end_src +** Deployment +#+name: postgres-deployment +#+begin_src yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc + labels: + app: postgres +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + labels: + app: postgres +spec: + replicas: 1 + serviceName: "postgres" + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + restartPolicy: Always + containers: + - name: postgres + image: docker.io/postgres:12.2-alpine + securityContext: + readOnlyRootFilesystem: true + runAsUser: 70 + runAsGroup: 70 + allowPrivilegeEscalation: false + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + - name: var-run-postgresql + mountPath: /var/run/postgresql + - name: tmp + mountPath: /tmp + ports: + - containerPort: 5432 + livenessProbe: + exec: + command: + - "sh" + - "-c" + - "pg_isready" + - "-U" + - "$POSTGRES_USER" + failureThreshold: 5 + periodSeconds: 10 + timeoutSeconds: 5 + env: + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_DB + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + - name: PGDATABASE + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGDATABASE + - name: PGUSER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGUSER + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + initContainers: + - name: postgres-db-permissions-fix + image: alpine:3.12 + command: + - /bin/sh + - -c + - "/bin/chown -R 70:70 /var/lib/postgresql/data" + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + volumes: + - name: var-lib-postgresql + persistentVolumeClaim: + claimName: postgres-pvc + - name: var-run-postgresql + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: "5432" + port: 5432 + targetPort: 5432 +#+end_src +** Deploying +#+begin_src shell :noweb yes +kubectl apply -f - << EOF +<> +--- +<> +EOF +#+end_src + +#+RESULTS: +#+begin_example +secret/postgres-configuration created +persistentvolumeclaim/postgres-pvc created +statefulset.apps/postgres created +service/postgres created +#+end_example + +* Setting local vars +Use /C-c C-v s/ to execute the following blocks +** Defaults for sql-mode blocks +#+begin_src elisp :results silent +(set (make-local-variable 'org-babel-default-header-args:sql-mode) + ;; Set up all sql-mode blocks to be postgres and literate + '((:results . "replace code") + (:product . "postgres") + (:session . "none") + (:noweb . "yes") + (:comments . "no") + (:wrap . "SRC example"))) +#+end_src + +** Default for connecting to sql-mode +#+begin_src elisp :results silent +(set (make-local-variable 'sql-server) "postgres") +(set (make-local-variable 'sql-port) 5432) +(set (make-local-variable 'sql-user) "postgres") +(set (make-local-variable 'sql-database) "postgres") +(set (make-local-variable 'sql-product) '(quote postgres)) +#+end_src + +** Default for creating new sql-mode connections +#+begin_src elisp :results silent +(set (make-local-variable 'sql-connection-alist) + (list + ;; setting these allows for the connection to be + ;; created on the fly + (list 'none + (list 'sql-product '(quote postgres)) + (list 'sql-user sql-user) + (list 'sql-database sql-database) + (list 'sql-port sql-port) + (list 'sql-server sql-server)))) +#+end_src + + +* Connecting to Postgres +Connection string: =postgres://postgres:password@postgres/postgres= + +* Sign in to Google auth +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC + +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC + +Login as application +#+begin_src tmate :window prepare +gcloud auth application-default login +#+end_src + +* Prepare IP dataset +** usage_all_ip_only_distinct_int +Copy data to a CSV file in a bucket +#+begin_src shell :prologue "( " :epilogue " ) 2>&1 ; :" +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-timestamp.txt + +bq extract \ + --destination_format CSV \ + k8s-infra-ii-sandbox:k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv +#+end_src + +#+RESULTS: +#+begin_example + +Welcome to BigQuery! This script will walk you through the +process of initializing your .bigqueryrc configuration file. + +First, we need to set up your credentials if they do not +already exist. + +Credential creation complete. Now we will select a default project. + +List of projects: + # projectId friendlyName + --- ------------------------------ ------------------------------ + 1 apisnoop apisnoop + 2 k8s-artifacts-prod k8s-artifacts-prod + 3 k8s-cip-test-prod k8s-cip-test-prod + 4 k8s-infra-e2e-scale-project k8s-infra-e2e-scale-project + 5 k8s-infra-ii-sandbox k8s-infra-ii-sandbox + 6 k8s-infra-prow-build k8s-infra-prow-build + 7 k8s-infra-prow-build-trusted k8s-infra-prow-build-trusted + 8 k8s-infra-public-pii k8s-infra-public-pii + 9 kubernetes-public kubernetes-public +Found multiple projects. Please enter a selection for +which should be the default, or leave blank to not +set a default. + +Enter a selection (1 - 9): Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (0s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (1s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (2s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (3s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (4s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (5s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (6s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (7s) Current status: RUNNING Waiting on bqjob_r4f2d7a96d94d571d_0000017a0878d18d_1 ... (7s) Current status: DONE + +Got EOF; exiting. Is your input from a terminal? +#+end_example + +List csv files +#+begin_src shell +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-timestamp.txt | tr -d '\n') +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv | head +echo "..." +printf "Total: " +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv | wc -l +#+end_src + +#+RESULTS: +#+begin_example +gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-202106141504-000000000000.csv +... +Total: 1 +#+end_example + +Download data +#+begin_src tmate :window prepare +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-timestamp.txt | tr -d '\n') +mkdir -p /tmp/usage_all_ip_only/ +gsutil cp \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.usage_all_ip_only_distinct_int-$TIMESTAMP-*.csv \ + /tmp/usage_all_ip_only/ +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +Merge the data +#+begin_src tmate :window prepare +cat /tmp/usage_all_ip_only/*.csv | tail +2 > /tmp/usage_all_ip_only.csv +#+end_src + +** shadow_pyasn_expanded +Copy data to a CSV file in a bucket +#+begin_src shell :prologue "( " :epilogue " ) 2>&1 ; :" +TIMESTAMP=$(date +%Y%m%d%H%M) +echo $TIMESTAMP > /tmp/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-timestamp.txt + +bq extract \ + --destination_format CSV \ + k8s-infra-ii-sandbox:k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv +#+end_src + +#+RESULTS: +#+begin_example + Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (0s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (1s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (2s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (3s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (4s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (5s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (6s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (8s) Current status: RUNNING Waiting on bqjob_r74813be26a8df4cd_0000017a087f30c1_1 ... (8s) Current status: DONE +#+end_example + +List csv files +#+begin_src shell +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-timestamp.txt | tr -d '\n') +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv | head +echo "..." +printf "Total: " +gsutil ls gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv | wc -l +#+end_src + +#+RESULTS: +#+begin_example +gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-202106141509-000000000000.csv +... +Total: 1 +#+end_example + +Download data +#+begin_src tmate :window prepare +TIMESTAMP=$(cat /tmp/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-timestamp.txt | tr -d '\n') +mkdir -p /tmp/shadow_pyasn_expanded/ +gsutil cp \ + gs://ii_bq_scratch_dump/k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded-$TIMESTAMP-*.csv \ + /tmp/shadow_pyasn_expanded/ +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +Merge the data +#+begin_src tmate :window prepare +cat /tmp/shadow_pyasn_expanded/*.csv | tail +2 > /tmp/shadow_pyasn_expanded.csv +#+end_src + +** shadow_pyasn_expanded +k8s-infra-ii-sandbox:k8s_artifacts_dataset_bb_test.shadow_pyasn_expanded + +* Create schema in Postgres +#+begin_src sql-mode +create table if not exists cust_ip ( + c_ip bigint not null +); +#+end_src + +#+RESULTS: +#+begin_SRC example +CREATE TABLE +#+end_SRC + + +#+begin_src sql-mode +create table if not exists shadow_pyasn_expanded ( + asn text, + cidr_ip cidr, + start_ip inet, + end_i inet, + start_ip_net bigint, + end_ip_1 bigint +); +#+end_src + +#+RESULTS: +#+begin_SRC example +CREATE TABLE +#+end_SRC + + + +* Insert data +#+begin_src tmate :window prepare +export PGUSER=ii; PGPASSWORD=DxSO4S1aUQG3dHoG8AXogt0rbm2PGc6HsVAVtSKnbsJF5bwi0CTKamGBULq6rhnu; +psql -U ii -d ii -h ii-ii-pooler.ii-db.svc.cluster.local -c "\\copy cust_ip from '/tmp/usage_all_ip_only.csv';" +#+end_src + + +#+begin_src tmate :window prepare +export PGUSER=ii; export PGPASSWORD=DxSO4S1aUQG3dHoG8AXogt0rbm2PGc6HsVAVtSKnbsJF5bwi0CTKamGBULq6rhnu; +psql -U ii -d ii -h ii-ii-pooler.ii-db.svc.cluster.local -c "\\copy shadow_pyasn_expanded from '/tmp/shadow_pyasn_expanded.csv' (DELIMITER(','));" +#+end_src + +* Discover the data +#+begin_src sql-mode +select count(*) from cust_ip; +#+end_src + +#+RESULTS: +#+begin_SRC example + count +--------- + 7417599 +(1 row) + +#+end_SRC + + +#+begin_src sql-mode +select count(*) from shadow_pyasn_expanded; +#+end_src + +#+RESULTS: +#+begin_SRC example + count +-------- + 927411 +(1 row) + +#+end_SRC + +* Add indexes to the tables + +#+begin_src sql-mode +create index on shadow_pyasn_expanded (end_ip_1); +#+end_src +#+begin_src sql-mode +create index on shadow_pyasn_expanded (start_ip_net); +#+end_src +#+begin_src sql-mode +create index on cust_ip (c_ip); +#+end_src + +* Join the data +#+begin_src sql-mode +select 1,2,3; +#+end_src + +#+RESULTS: +#+begin_SRC example + ?column? | ?column? | ?column? +----------+----------+---------- + 1 | 2 | 3 +(1 row) + +#+end_SRC + +#+begin_src sql-mode +SELECT +shadow_pyasn_expanded.cidr_ip, +shadow_pyasn_expanded.start_ip_net, +shadow_pyasn_expanded.end_ip_1, +shadow_pyasn_expanded.asn, +cust_ip.c_ip +FROM +shadow_pyasn_expanded, +cust_ip +WHERE +cust_ip.c_ip >= shadow_pyasn_expanded.start_ip_net +AND cust_ip.c_ip <= shadow_pyasn_expanded.end_ip_1 +LIMIT 10 +; +#+end_src + +#+begin_src sql-mode +\copy ( + SELECT + shadow_pyasn_expanded.cidr_ip, + shadow_pyasn_expanded.start_ip_net, + shadow_pyasn_expanded.end_ip_1, + shadow_pyasn_expanded.asn, + cust_ip.c_ip +FROM + shadow_pyasn_expanded, + cust_ip +WHERE + cust_ip.c_ip >= shadow_pyasn_expanded.start_ip_net +AND cust_ip.c_ip <= shadow_pyasn_expanded.end_ip_1 +) +TO + '/tmp/match-ip-to-iprange.csv' +CSV +HEADER +; +#+end_src +#+begin_src sql-mode +\copy ( SELECT shadow_pyasn_expanded.cidr_ip, shadow_pyasn_expanded.start_ip_net, shadow_pyasn_expanded.end_ip_1, shadow_pyasn_expanded.asn, cust_ip.c_ip FROM shadow_pyasn_expanded, cust_ip WHERE cust_ip.c_ip >= shadow_pyasn_expanded.start_ip_net AND cust_ip.c_ip <= shadow_pyasn_expanded.end_ip_1) TO '/tmp/match-ip-to-iprange.csv' CSV HEADER; +#+end_src + +#+RESULTS: +#+begin_SRC example +#+end_SRC + +I ended up adding indexes and that got it to complete +I have to come back to add the missing steps I did to get a successful csv' +* Upload results to bq +#+begin_src tmate :window prepare +bq load --autodetect k8s_artifacts_dataset_bb_test.match_ip_range_to_asn /tmp/match-ip-to-iprange.csv +#+end_src diff --git a/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/README.org b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/README.org new file mode 100644 index 0000000..4df0cf1 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/README.org @@ -0,0 +1,182 @@ +#+TITLE: nginx njs host rewriting + +#+begin_quote +A nginx implementation for registry.k8s.io +#+end_quote + +* Setting it up +Configure nginx to use the njs script +#+begin_src conf :tangle ./nginx.conf +load_module modules/ngx_http_js_module.so; +env UPSTREAM_HOST; +events {} +pid /tmp/nginx.pid; +http { + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + js_include /etc/nginx/njs/http.js; + js_set $upstream_host fetch_upstream_host; + server { + listen 8012; + rewrite ^/(.*)$ https://$upstream_host/$1; + location / { + resolver 8.8.8.8; + proxy_pass https://$upstream_host/$1; + } + } +} +#+end_src + +The http response method +#+begin_src javascript :tangle ./http.js +function fetch_upstream_host(r) { + var reg = "k8s.gcr.io" + if (r.remoteAddress === process.env.MATCH_IP) { + reg = "registry-1.docker.io" + } + r.error(`registry: ${reg}`) + return reg +} +#+end_src + +* Testing in Docker +#+begin_src tmate :window nginx +docker \ + run \ + -it \ + --rm \ + -p 8012:8012 \ + -v $PWD/nginx.conf:/etc/nginx/nginx.conf \ + -v $PWD/http.js:/etc/nginx/njs/http.js \ + -e TEST="Hello from njs in nginx!" \ + -e MATCH_IP="172.17.0.1" \ + nginxinc/nginx-unprivileged:1.20 +#+end_src + +* Deploying in Kubernetes +Create a namespace +#+begin_src shell +kubectl create namespace k8s-reg-nginx-njs -o yaml --dry-run=client | \ + kubectl apply -f - +kubectl label namespace k8s-reg-nginx-njs cert-manager-tls=sync --overwrite +#+end_src + +#+RESULTS: +#+begin_example +namespace/k8s-reg-nginx-njs created +namespace/k8s-reg-nginx-njs labeled +#+end_example + +Create a ConfigMap for the config +#+BEGIN_SRC shell :results silent +kubectl -n k8s-reg-nginx-njs \ + create configmap config \ + --from-file=nginx\.conf=./nginx.conf \ + --from-file=http\.js=./http.js \ + --dry-run=client -o yaml | kubectl apply -f - +#+END_SRC + +Configure the nginx deployment +#+begin_src yaml :tangle ./nginx.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${NGINX_LAST_CFG} + labels: + app: k8s-reg-nginx-njs + name: k8s-reg-nginx-njs +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-nginx-njs + template: + metadata: + annotations: + lastcfg: | + ${NGINX_LAST_CFG} + labels: + app: k8s-reg-nginx-njs + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged:1.20 + securityContext: + runAsUser: 101 + runAsGroup: 101 + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + env: + - name: MATCH_IP + value: "${MATCH_IP}" + volumeMounts: + - name: config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: config + mountPath: /etc/nginx/njs/http.js + subPath: http.js + ports: + - name: http + containerPort: 8012 + volumes: + - name: config + configMap: + name: config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: k8s-reg-nginx-njs + name: k8s-reg-nginx-njs +spec: + ports: + - name: k8s-reg-nginx-njs + port: 8012 + protocol: TCP + targetPort: 8012 + selector: + app: k8s-reg-nginx-njs + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-nginx-njs +spec: + rules: + - host: k8s-reg-nginx-njs.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-nginx-njs + port: + number: 8012 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-nginx-njs.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod +#+end_src + +#+BEGIN_SRC shell :results silent +export \ + NGINX_LAST_CFG=$(cat nginx.conf http.js | sha256sum) \ + MATCH_IP="$(kubectl -n "${SHARINGIO_PAIR_NAME}" get pod "${SHARINGIO_PAIR_NAME}-humacs-0" -o=jsonpath='{.status.podIP}')" +envsubst < nginx.yaml | kubectl -n k8s-reg-nginx-njs apply -f - +#+END_SRC + +* Notes and links +- https://www.rkatz.xyz/post/2021-09-13-nginx-njs-experiments/ +- https://gist.github.com/runlevel5/5d038e91ea1f874a1dd1608d4e7fcace +- https://nginx.org/en/docs/njs/node_modules.html +- https://www.digitalocean.com/community/tutorials/how-to-create-temporary-and-permanent-redirects-with-nginx diff --git a/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/http.js b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/http.js new file mode 100644 index 0000000..a358776 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/http.js @@ -0,0 +1,12 @@ + + +// The http response method + +function fetch_upstream_host(r) { + var reg = "k8s.gcr.io" + if (r.remoteAddress === process.env.MATCH_IP) { + reg = "registry-1.docker.io" + } + r.error(`registry: ${reg}`) + return reg +} diff --git a/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/nginx.conf b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/nginx.conf new file mode 100644 index 0000000..95d41de --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/nginx.conf @@ -0,0 +1,25 @@ +# Setting it up +# Configure nginx to use the njs script + +load_module modules/ngx_http_js_module.so; +env UPSTREAM_HOST; +events {} +pid /tmp/nginx.pid; +http { + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + js_include /etc/nginx/njs/http.js; + js_set $upstream_host fetch_upstream_host; + server { + listen 8012; + rewrite ^/(.*)$ https://$upstream_host/$1; + location / { + resolver 8.8.8.8; + proxy_pass https://$upstream_host/$1; + } + } +} diff --git a/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/nginx.yaml b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/nginx.yaml new file mode 100644 index 0000000..27bb8f6 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/nginx-njs-host-rewriting/nginx.yaml @@ -0,0 +1,88 @@ + + +# Configure the nginx deployment + +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + lastcfg: | + ${NGINX_LAST_CFG} + labels: + app: k8s-reg-nginx-njs + name: k8s-reg-nginx-njs +spec: + replicas: 10 + selector: + matchLabels: + app: k8s-reg-nginx-njs + template: + metadata: + annotations: + lastcfg: | + ${NGINX_LAST_CFG} + labels: + app: k8s-reg-nginx-njs + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged:1.20 + securityContext: + runAsUser: 101 + runAsGroup: 101 + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + env: + - name: MATCH_IP + value: "${MATCH_IP}" + volumeMounts: + - name: config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: config + mountPath: /etc/nginx/njs/http.js + subPath: http.js + ports: + - name: http + containerPort: 8012 + volumes: + - name: config + configMap: + name: config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: k8s-reg-nginx-njs + name: k8s-reg-nginx-njs +spec: + ports: + - name: k8s-reg-nginx-njs + port: 8012 + protocol: TCP + targetPort: 8012 + selector: + app: k8s-reg-nginx-njs + type: ClusterIP +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: k8s-reg-nginx-njs +spec: + rules: + - host: k8s-reg-nginx-njs.${SHARINGIO_PAIR_BASE_DNS_NAME} + http: + paths: + - backend: + service: + name: k8s-reg-nginx-njs + port: + number: 8012 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - k8s-reg-nginx-njs.${SHARINGIO_PAIR_BASE_DNS_NAME} + secretName: letsencrypt-prod diff --git a/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/README.org b/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/README.org new file mode 100644 index 0000000..6922104 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/README.org @@ -0,0 +1,238 @@ +#+TITLE: Setting up a transparent cache + +Hosting your own copies of Kubernetes images is a sustainable way to give back to the Kubernetes community. +By setting up a transparent proxy cache, images will be pulled from you closer OCI compatible cache and then from /k8s.gcr.io/. + +* Deploying a proxy cache +Here are two OCI compatible container registries that you can bring up to host container images. + +** Distribution +Define the config +#+begin_src yaml :tangle ./config.yml +version: 0.1 +log: + accesslog: + disabled: true + level: debug + fields: + service: registry + environment: development +auth: + htpasswd: + realm: basic-realm + path: /etc/docker/registry/htpasswd +storage: + delete: + enabled: true + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: registry-k8s-io-registry-k8s-io + debug: + addr: :5001 + prometheus: + enabled: true + path: /metrics + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 +proxy: + remoteurl: https://k8s.gcr.io +#+end_src + +*** Deploy with Docker +#+begin_src shell +USERNAME=distribution +PASSWORD=Distritest1234! +htpasswd -Bbn $USERNAME $PASSWORD > /tmp/htpasswd +#+end_src + +#+begin_src shell :results silent +docker run -d \ + -p 5000:5000 \ + -v /tmp/htpasswd:/etc/docker/registry/htpasswd \ + -v /tmp/config.yml:/etc/docker/registry/config.yml \ + --restart always \ + --name registry-proxy-cache \ + registry:2.7.1 +#+end_src + +#+begin_src shell :results silent +docker rm -f registry-proxy-cache +#+end_src + +*** Deploy with Kubernetes + +Create the namespace +#+begin_src shell +kubectl create ns distribution +#+end_src + +Create the config +#+begin_src shell +kubectl -n distribution create configmap distribution-config --from-file=config\.yml=distribution-config.yaml --dry-run=client -o yaml | kubectl apply -f - +#+end_src + +Create the auth secret +#+begin_src shell +USERNAME=distribution +PASSWORD=Distritest1234! +kubectl -n distribution create secret generic distribution-auth --from-literal=htpasswd="$(htpasswd -Bbn $USERNAME $PASSWORD)" +#+end_src + +Define the deployment +#+begin_src yaml :tangle ./distribution.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: distribution +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: distribution-data + namespace: distribution +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distribution + namespace: distribution +spec: + replicas: 1 + selector: + matchLabels: + app: distribution + template: + metadata: + labels: + app: distribution + spec: + containers: + - name: distribution + image: registry:2.7.1 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 10m + memory: 30Mi + requests: + cpu: 10m + memory: 30Mi + ports: + - containerPort: 5000 + env: + - name: TZ + value: "Pacific/Auckland" + volumeMounts: + - name: distribution-data + mountPath: /var/lib/registry + - name: distribution-config + mountPath: /etc/docker/registry/config.yml + subPath: config.yml + - name: distribution-auth + mountPath: /etc/docker/registry/htpasswd + subPath: htpasswd + readinessProbe: + tcpSocket: + port: 5000 + initialDelaySeconds: 2 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 5000 + initialDelaySeconds: 1 + periodSeconds: 20 + volumes: + - name: distribution-data + persistentVolumeClaim: + claimName: distribution-data + - name: distribution-config + configMap: + name: distribution-config + - name: distribution-auth + secret: + secretName: distribution-auth +--- +apiVersion: v1 +kind: Service +metadata: + name: distribution + namespace: distribution +spec: + ports: + - port: 5000 + targetPort: 5000 + selector: + app: distribution +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: distribution + namespace: distribution + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: "0" +spec: + tls: + - hosts: + - my-registry.mirror.host + secretName: letsencrypt-prod + rules: + - host: my-registry.mirror.host + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: distribution + port: + number: 5000 +#+end_src + +Install distribution +#+begin_src shell +kubectl apply -f distribution.yaml +#+end_src + +** Harbor +*** Deploy with the installer +https://goharbor.io/docs/2.2.0/install-config/download-installer/ +*** Deploy with Helm in Kubernetes +https://goharbor.io/docs/2.2.0/install-config/harbor-ha-helm/ + +* Deploy +** Kubeadm +#+begin_src shell +kubeadm init --image-repository="my-registry.mirror.host" +#+end_src + +** Kops +https://kops.sigs.k8s.io/cluster_spec/#registry-mirrors +https://kops.sigs.k8s.io/cluster_spec/#containerproxy +#+begin_src yaml +spec: + assets: + containerProxy: my-registry.mirror.host +#+end_src + +** ClusterAPI +Requires v1alpha4 +https://github.com/kubernetes-sigs/cluster-api/blob/af33e43/bootstrap/kubeadm/api/v1alpha4/kubeadm_types.go#L115-L120 + diff --git a/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/config.yml b/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/config.yml new file mode 100644 index 0000000..dab403a --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/config.yml @@ -0,0 +1,41 @@ +# Distribution +# Define the config + +version: 0.1 +log: + accesslog: + disabled: true + level: debug + fields: + service: registry + environment: development +auth: + htpasswd: + realm: basic-realm + path: /etc/docker/registry/htpasswd +storage: + delete: + enabled: true + filesystem: + rootdirectory: /var/lib/registry + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: registry-k8s-io-registry-k8s-io + host: https://bobymcbobs.pair.sharing.io:5000 + debug: + addr: :5001 + prometheus: + enabled: true + path: /metrics + headers: + X-Content-Type-Options: [nosniff] +health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 +proxy: + remoteurl: https://k8s.gcr.io diff --git a/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/distribution.yaml b/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/distribution.yaml new file mode 100644 index 0000000..eb74f67 --- /dev/null +++ b/research/k8s-infra-registry-artifacts-migration/setting-up-a-transparent-cache/distribution.yaml @@ -0,0 +1,118 @@ + + +# Define the deployment + +apiVersion: v1 +kind: Namespace +metadata: + name: distribution +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: distribution-data + namespace: distribution +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distribution + namespace: distribution +spec: + replicas: 1 + selector: + matchLabels: + app: distribution + template: + metadata: + labels: + app: distribution + spec: + containers: + - name: distribution + image: registry:2.7.1 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 10m + memory: 30Mi + requests: + cpu: 10m + memory: 30Mi + ports: + - containerPort: 5000 + env: + - name: TZ + value: "Pacific/Auckland" + volumeMounts: + - name: distribution-data + mountPath: /var/lib/registry + - name: distribution-config + mountPath: /etc/docker/registry/config.yml + subPath: config.yml + - name: distribution-auth + mountPath: /etc/docker/registry/htpasswd + subPath: htpasswd + readinessProbe: + tcpSocket: + port: 5000 + initialDelaySeconds: 2 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 5000 + initialDelaySeconds: 1 + periodSeconds: 20 + volumes: + - name: distribution-data + persistentVolumeClaim: + claimName: distribution-data + - name: distribution-config + configMap: + name: distribution-config + - name: distribution-auth + secret: + secretName: distribution-auth +--- +apiVersion: v1 +kind: Service +metadata: + name: distribution + namespace: distribution +spec: + ports: + - port: 5000 + targetPort: 5000 + selector: + app: distribution +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: distribution + namespace: distribution + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: "0" +spec: + tls: + - hosts: + - my-registry.mirror.host + secretName: letsencrypt-prod + rules: + - host: my-registry.mirror.host + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: distribution + port: + number: 5000 diff --git a/research/k8s-infra-wg/sandbox-infra-tf.org b/research/k8s-infra-wg/sandbox-infra-tf.org new file mode 100644 index 0000000..d91da52 --- /dev/null +++ b/research/k8s-infra-wg/sandbox-infra-tf.org @@ -0,0 +1,146 @@ +#+TITLE: Sandbox Infra Tf +The Goal is to template building of all infra in the k8s-infra-ii-sandbox project. +Starting point would be to create a list of infra we need in the project. +The pr that kicked this off is [infra-adit#2011](https://github.com/kubernetes/k8s.io/pull/2011) +It mentions a bunch of one off infra that will be the focus of our doc. +* List of infra we currently use in the sandbox +I have a full list of the resources in [ii/k8s-infra-dump](https://github.com/ii/k8s-infra-dump/tree/main/resource-config-bulk-export) +This is a list of services I am aware of using: +- Big query services + #+begin_example +bigqueryconnection.googleapis.com BigQuery Connection API +bigquerydatatransfer.googleapis.com BigQuery Data Transfer API +bigqueryreservation.googleapis.com BigQuery Reservation API + #+end_example + I notice the project also has these two services: + - bigquery.googleapis.com + - biquerystorage.googleapis.com + Perhaps they are default to an account with bq enabled? Will test +- BQ dataset that will be used for log analysis + Name? Current is 'k8s_artifacts_gcslogs_appspot' +- GCS buckets: + - export-destination (I do not know if this bucket is still needed) + - ii_bq_scratch_dump (This was very handy for bq testing) +- Registry? I have the terraform for this, but dont think we have an immediate need? +- Cluster setup is already in [cncf-config] (https://github.com/cncf-infra/prow-config/tree/master/infra/gcp/clusters/projects/k8s-infra-ii-sandbox) +* GCP things +First time using gcloud on this host, log in first. +#+begin_src shell +gcloud auth login +#+end_src +#+begin_src shell +gcloud auth list +#+end_src + +#+RESULTS: +#+begin_example + Credentialed Accounts +ACTIVE ACCOUNT +,* bb@ii.coop +#+end_example + +#+begin_src shell +gcloud projects list +#+end_src + +#+RESULTS: +#+begin_example +PROJECT_ID NAME PROJECT_NUMBER +apisnoop apisnoop 840466421052 +k8s-artifacts-prod k8s-artifacts-prod 388270116193 +k8s-artifacts-prod-bak k8s-artifacts-prod-bak 1057569514213 +k8s-cip-test-prod k8s-cip-test-prod 693665670941 +k8s-conform k8s-conform 228988630781 +etc....... +#+end_example +Ok gcloud is active on my box +* Terraform templates for bq-dataset, bucket +First the dataset +#+begin_src terraform :tangle (concat (getenv "HOME") "/terrafor_test/bq_test_dataset.tf") +resource "google_bigquery_dataset" "k8s_artifacts_dataset_bb_test" { + access { + role = "OWNER" + special_group = "projectOwners" + } + + access { + role = "OWNER" + user_by_email = "bb@ii.coop" + } + + access { + role = "READER" + special_group = "projectReaders" + } + + access { + role = "WRITER" + special_group = "projectWriters" + } + + dataset_id = "k8s_artifacts_dataset_bb_test" + delete_contents_on_destroy = false + + labels = merge({ managed-by-cnrm = "true" }) + + location = "US" + project = "k8s-infra-ii-sandbox" +} +#+end_src +Now the bucket +#+begin_src terraform :tangle (concat (getenv "HOME") "/terrafor_test/bucket_test.tf") +resource "google_storage_bucket" "k8s_artifacts_bucket_bb_test" { + force_destroy = false + + labels = merge({ managed-by-cnrm = "true" }) + + location = "US" + name = "k8s-infra-ii-sandbox-bb-test" + project = "631771264409" + storage_class = "STANDARD" +} +#+end_src +Provider +#+begin_src terraform :tangle (concat (getenv "HOME") "/terrafor_test/provider.tf") +/* +This file defines: +- Required provider versions +- Storage backend details +*/ + +terraform { + + + required_providers { + google = { + source = "hashicorp/google" + version = "~> 3.68.0" + } + google-beta = { + source = "hashicorp/google-beta" + version = "~> 3.68.0" + } + } +} + +#+end_src +Versions +#+begin_src terraform :tangle (concat (getenv "HOME") "/terrafor_test/versions.tf") +/* +This file defines: +- Required Terraform version +*/ + +terraform { + required_version = "~> 0.13" +} +#+end_src +Terraform init +#+begin_src tmate :window terraform-init :dir (concat (getenv "HOME") "/terrafor_test") +terraform init +#+end_src + +Terraform apply +#+begin_src tmate :window terraform-apply :dir (concat (getenv "HOME") "/terrafor_test") +terraform apply +#+end_src diff --git a/research/k8s/explore-get-apigroup-endpoints.org b/research/k8s/explore-get-apigroup-endpoints.org new file mode 100644 index 0000000..00ecc06 --- /dev/null +++ b/research/k8s/explore-get-apigroup-endpoints.org @@ -0,0 +1,257 @@ +# -*- ii:t; -*- +#+TITLE: Explore get*APIGroup Endpoints + + +* Goal + +Looking to the following endpoints so that a mock test can be created. + +#+begin_example +getApiregistrationAPIGroup +getAppsAPIGroup +getAuditregistrationAPIGroup +getAuthenticationAPIGroup +getAuthorizationAPIGroup +getAutoscalingAPIGroup +getBatchAPIGroup +getCertificatesAPIGroup +getCodeVersion +getCoordinationAPIGroup +getDiscoveryAPIGroup +getEventsAPIGroup +getExtensionsAPIGroup +getFlowcontrolApiserverAPIGroup +getNetworkingAPIGroup +getNodeAPIGroup +getPolicyAPIGroup +getRbacAuthorizationAPIGroup +getSchedulingAPIGroup +getSettingsAPIGroup +getStorageAPIGroup +#+end_example + +- `getCodeVersion` is found at `/version` so will need to be in another test as the test foucs will be the `/apis` path. + +* Explore endpoints with 1.17.0 + +** Create cluster + + #+begin_src bash + kind create cluster --name k8s1-17-0 --image kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62 + #+end_src + + #+RESULTS: + +** Cluster version + + #+begin_src bash :exports both + kubectl get nodes -o wide --context kind-k8s1-17-0 + #+end_src + + #+RESULTS: + #+begin_example + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + k8s1-17-0-control-plane Ready master 22m v1.17.0 172.18.0.2 Ubuntu 19.10 5.3.0-59-generic containerd://1.3.2 + #+end_example + +** Locate alpha or beta endpoints + + #+begin_src bash :exports both + kubectl get --context kind-k8s1-17-0 --raw /apis | jq -r ' .groups | .[].preferredVersion.groupVersion ' | egrep 'alpha|beta' | sort | nl + #+end_src + + #+RESULTS: + #+begin_example + 1 certificates.k8s.io/v1beta1 + 2 discovery.k8s.io/v1beta1 + 3 events.k8s.io/v1beta1 + 4 extensions/v1beta1 + 5 node.k8s.io/v1beta1 + 6 policy/v1beta1 + #+end_example + +** Locate stable endpoints + + #+begin_src bash :exports both + kubectl get --context kind-k8s1-17-0 --raw /apis | jq -r ' .groups | .[].preferredVersion.groupVersion ' | egrep -v 'alpha|beta' | sort | nl + #+end_src + + #+RESULTS: + #+begin_example + 1 admissionregistration.k8s.io/v1 + 2 apiextensions.k8s.io/v1 + 3 apiregistration.k8s.io/v1 + 4 apps/v1 + 5 authentication.k8s.io/v1 + 6 authorization.k8s.io/v1 + 7 autoscaling/v1 + 8 batch/v1 + 9 coordination.k8s.io/v1 + 10 networking.k8s.io/v1 + 11 rbac.authorization.k8s.io/v1 + 12 scheduling.k8s.io/v1 + 13 storage.k8s.io/v1 + #+end_example + +* Explore endpoints with 1.19.0 beta + +** Create cluster + + #+begin_src bash :results silent + kind create cluster --name k8s1-19-0b2 --image heyste/node:1.19.0-beta2 + #+end_src + +** Cluster version + + #+begin_src bash :exports both + kubectl get nodes -o wide --context kind-k8s1-19-0b2 + #+end_src + + #+RESULTS: + #+begin_example + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + k8s1-19-0b2-control-plane Ready master 42m v1.19.0-beta.2.18+7151131d79674d 172.18.0.3 Ubuntu 20.04 LTS 5.3.0-59-generic containerd://1.3.3-14-g449e9269 + #+end_example + +** Locate alpha or beta endpoints + + #+begin_src bash :exports both + kubectl get --context kind-k8s1-19-0b2 --raw /apis | jq -r ' .groups | .[].preferredVersion.groupVersion ' | egrep 'alpha|beta' | sort | nl + #+end_src + + #+RESULTS: + #+begin_example + 1 discovery.k8s.io/v1beta1 + 2 events.k8s.io/v1beta1 + 3 extensions/v1beta1 + 4 node.k8s.io/v1beta1 + 5 policy/v1beta1 + #+end_example + +** Locate stable endpoints + + #+begin_src bash :exports both + kubectl get --context kind-k8s1-19-0b2 --raw /apis | jq -r ' .groups | .[].preferredVersion.groupVersion ' | egrep -v 'alpha|beta' | sort | nl + #+end_src + + #+RESULTS: + #+begin_example + 1 admissionregistration.k8s.io/v1 + 2 apiextensions.k8s.io/v1 + 3 apiregistration.k8s.io/v1 + 4 apps/v1 + 5 authentication.k8s.io/v1 + 6 authorization.k8s.io/v1 + 7 autoscaling/v1 + 8 batch/v1 + 9 certificates.k8s.io/v1 + 10 coordination.k8s.io/v1 + 11 networking.k8s.io/v1 + 12 rbac.authorization.k8s.io/v1 + 13 scheduling.k8s.io/v1 + 14 storage.k8s.io/v1 + #+end_example + +* Bash version of possible mock test + + #+begin_src bash :exports both + kubectl get --context kind-k8s1-19-0b2 --raw /apis | jq -rc '.groups[]' | while IFS='' read stack + do + name=$(echo "$stack " | jq '.name') + echo "APIGroup: $name" + preferred_version=$(echo "$stack" | jq ' .preferredVersion.groupVersion' | sed 's/"//g') + echo "Preferred version: $preferred_version" + + if echo $preferred_version | egrep -v 'alpha|beta' 1>/dev/null ; then + # echo ">>> $preferred_version" + versions=$(echo "$stack" | jq -rc '.versions') + # echo ">>> $versions" + + for version in $(echo "$versions" | jq -rc ' .[] | .groupVersion') + do + # echo ">> $version <<" + # echo "## $preferred_version ##" + if [[ $version == $preferred_version ]] ; then + echo "MATCH !! $preferred_version" + fi + done + + fi + echo + done + #+end_src + + #+RESULTS: + #+begin_example + APIGroup: "apiregistration.k8s.io" + Preferred version: apiregistration.k8s.io/v1 + MATCH !! apiregistration.k8s.io/v1 + + APIGroup: "extensions" + Preferred version: extensions/v1beta1 + + APIGroup: "apps" + Preferred version: apps/v1 + MATCH !! apps/v1 + + APIGroup: "events.k8s.io" + Preferred version: events.k8s.io/v1beta1 + + APIGroup: "authentication.k8s.io" + Preferred version: authentication.k8s.io/v1 + MATCH !! authentication.k8s.io/v1 + + APIGroup: "authorization.k8s.io" + Preferred version: authorization.k8s.io/v1 + MATCH !! authorization.k8s.io/v1 + + APIGroup: "autoscaling" + Preferred version: autoscaling/v1 + MATCH !! autoscaling/v1 + + APIGroup: "batch" + Preferred version: batch/v1 + MATCH !! batch/v1 + + APIGroup: "certificates.k8s.io" + Preferred version: certificates.k8s.io/v1 + MATCH !! certificates.k8s.io/v1 + + APIGroup: "networking.k8s.io" + Preferred version: networking.k8s.io/v1 + MATCH !! networking.k8s.io/v1 + + APIGroup: "policy" + Preferred version: policy/v1beta1 + + APIGroup: "rbac.authorization.k8s.io" + Preferred version: rbac.authorization.k8s.io/v1 + MATCH !! rbac.authorization.k8s.io/v1 + + APIGroup: "storage.k8s.io" + Preferred version: storage.k8s.io/v1 + MATCH !! storage.k8s.io/v1 + + APIGroup: "admissionregistration.k8s.io" + Preferred version: admissionregistration.k8s.io/v1 + MATCH !! admissionregistration.k8s.io/v1 + + APIGroup: "apiextensions.k8s.io" + Preferred version: apiextensions.k8s.io/v1 + MATCH !! apiextensions.k8s.io/v1 + + APIGroup: "scheduling.k8s.io" + Preferred version: scheduling.k8s.io/v1 + MATCH !! scheduling.k8s.io/v1 + + APIGroup: "coordination.k8s.io" + Preferred version: coordination.k8s.io/v1 + MATCH !! coordination.k8s.io/v1 + + APIGroup: "node.k8s.io" + Preferred version: node.k8s.io/v1beta1 + + APIGroup: "discovery.k8s.io" + Preferred version: discovery.k8s.io/v1beta1 + + #+end_example diff --git a/research/k8s/explore-lifecycle-e2e-test-flakes.org b/research/k8s/explore-lifecycle-e2e-test-flakes.org new file mode 100644 index 0000000..0e732ad --- /dev/null +++ b/research/k8s/explore-lifecycle-e2e-test-flakes.org @@ -0,0 +1,774 @@ +#+TITLE:Explore Lifecycle E2e Test Flakes + + +* Summary of Lifecycle Test flakes + +After reviewing a number of e2e lifecycle tests for sig-apps, there looks to be a common issue trying to mount the service account secrets. +Each volume is created and destroyed before the pod even gets to attach the volume. +Based on the volume prefix =kube-api-access= which is located in [[https://github.com/kubernetes/kubernetes/blob/master/plugin/pkg/admission/serviceaccount/admission.go#L56-L57][serviceaccount/admission.go]], the =ServiceAccount= prefix is used for =ServiceAccountTokenSecret= [[https://github.com/kubernetes/kubernetes/blob/master/plugin/pkg/admission/serviceaccount/admission.go#L451-L477][Ref:L451-477]]. + +Due to the complexity and knowledge required to deal with these flakes it would be helpful to know which SIG is best placed to create a suitable fix. + +Flake details where gathered from the following testgrids. + +- https://testgrid.k8s.io/sig-release-master-blocking#kind-master-parallel&include-filter-by-regex=should.run.*lifecycle.*&width=5 +- https://testgrid.k8s.io/sig-release-master-blocking#kind-ipv6-master-parallel&include-filter-by-regex=should.run.*lifecycle.*&width=5 + +* Flake 1: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366153928210649088 +- namespace: deployment-2394 +- node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366153928210649088/artifacts/logs/kind-worker/kubelet.log + +** Failure message (A) + +#+begin_example +�[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Feb 28 22:51:10.748: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.748: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.754: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.754: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.769: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.769: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.782: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:51:10.782: INFO: observed Deployment test-deployment in namespace deployment-2394 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 28 22:52:10.748: FAIL: failed to see replicas of test-deployment in namespace deployment-2394 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc00023e240>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366153928210649088/artifacts/logs/kind-worker/kubelet.log | \ + grep "deployment-2394" | grep "will not retry!" | tail -1 +#+end_src + +#+RESULTS: +#+begin_example +Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.825451 243 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-fqqvk.16680b29cf00e812", GenerateName:"", Namespace:"deployment-2394", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-2394", Name:"test-deployment-7778d6bf57-fqqvk", UID:"dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8", APIVersion:"v1", ResourceVersion:"12891", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-2394" not found' (will not retry!) +#+end_example + +** Volume: kube-api-access-2t44s +- volume logs: + + #+BEGIN_SRC shell :results verbatim :exports both + curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366153928210649088/artifacts/logs/kind-worker/kubelet.log | \ + grep "kube-api-access-2t44s" + #+end_src + + #+RESULTS: + #+begin_example + Feb 28 22:51:11 kind-worker kubelet[243]: I0228 22:51:11.755606 243 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") pod "test-deployment-7778d6bf57-fqqvk" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8") + Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.542102 243 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") pod "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8") + Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.560997 243 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s" (OuterVolumeSpecName: "kube-api-access-2t44s") pod "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8"). InnerVolumeSpecName "kube-api-access-2t44s". PluginName "kubernetes.io/projected", VolumeGidValue "" + Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.642678 243 reconciler.go:319] Volume detached for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") on node "kind-worker" DevicePath "" + Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606772 243 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-2t44s ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} + Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606897 243 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2t44s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-fqqvk_deployment-2394(dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8): CreateContainerConfigError: cannot find volume "kube-api-access-2t44s" to mount into container "test-deployment" + Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606931 243 pod_workers.go:191] Error syncing pod dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8 ("test-deployment-7778d6bf57-fqqvk_deployment-2394(dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"" + Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.825451 243 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-fqqvk.16680b29cf00e812", GenerateName:"", Namespace:"deployment-2394", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-2394", Name:"test-deployment-7778d6bf57-fqqvk", UID:"dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8", APIVersion:"v1", ResourceVersion:"12891", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-2394" not found' (will not retry!) + #+end_example + +* Flake 2: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365724105587822592 +- namespace: deployment-8926 +- node: kind-worker2 +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365724105587822592/artifacts/logs/kind-worker2/kubelet.log + +** Failure message (A) + +#+begin_example +¿½[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Feb 27 18:30:19.521: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.521: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.526: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.527: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.545: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.545: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.572: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:30:19.572: INFO: observed Deployment test-deployment in namespace deployment-8926 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 18:31:19.520: FAIL: failed to see replicas of test-deployment in namespace deployment-8926 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc00023e240>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365724105587822592/artifacts/logs/kind-worker2/kubelet.log | \ + grep "deployment-8926" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Feb 27 18:31:25 kind-worker2 kubelet[243]: E0227 18:31:25.672155 243 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zgtz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-5kn2d_deployment-8926(6a27c0b2-b89e-41bc-a323-efcfd7dcca33): CreateContainerConfigError: cannot find volume "kube-api-access-zgtz4" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-zgtz4 + + #+BEGIN_SRC shell :results verbatim :exports both + curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365724105587822592/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-zgtz4" + #+end_src + + #+RESULTS: + #+begin_example + Feb 27 18:30:19 kind-worker2 kubelet[243]: I0227 18:30:19.690640 243 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-zgtz4" (UniqueName: "kubernetes.io/projected/6a27c0b2-b89e-41bc-a323-efcfd7dcca33-kube-api-access-zgtz4") pod "test-deployment-7778d6bf57-5kn2d" (UID: "6a27c0b2-b89e-41bc-a323-efcfd7dcca33") + Feb 27 18:31:25 kind-worker2 kubelet[243]: I0227 18:31:25.509650 243 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-zgtz4" (UniqueName: "kubernetes.io/projected/6a27c0b2-b89e-41bc-a323-efcfd7dcca33-kube-api-access-zgtz4") pod "6a27c0b2-b89e-41bc-a323-efcfd7dcca33" (UID: "6a27c0b2-b89e-41bc-a323-efcfd7dcca33") + Feb 27 18:31:25 kind-worker2 kubelet[243]: I0227 18:31:25.537865 243 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a27c0b2-b89e-41bc-a323-efcfd7dcca33-kube-api-access-zgtz4" (OuterVolumeSpecName: "kube-api-access-zgtz4") pod "6a27c0b2-b89e-41bc-a323-efcfd7dcca33" (UID: "6a27c0b2-b89e-41bc-a323-efcfd7dcca33"). InnerVolumeSpecName "kube-api-access-zgtz4". PluginName "kubernetes.io/projected", VolumeGidValue "" + Feb 27 18:31:25 kind-worker2 kubelet[243]: I0227 18:31:25.610224 243 reconciler.go:319] Volume detached for volume "kube-api-access-zgtz4" (UniqueName: "kubernetes.io/projected/6a27c0b2-b89e-41bc-a323-efcfd7dcca33-kube-api-access-zgtz4") on node "kind-worker2" DevicePath "" + Feb 27 18:31:25 kind-worker2 kubelet[243]: E0227 18:31:25.672018 243 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-zgtz4 ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} + Feb 27 18:31:25 kind-worker2 kubelet[243]: E0227 18:31:25.672155 243 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zgtz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-5kn2d_deployment-8926(6a27c0b2-b89e-41bc-a323-efcfd7dcca33): CreateContainerConfigError: cannot find volume "kube-api-access-zgtz4" to mount into container "test-deployment" + Feb 27 18:31:25 kind-worker2 kubelet[243]: E0227 18:31:25.672194 243 pod_workers.go:191] Error syncing pod 6a27c0b2-b89e-41bc-a323-efcfd7dcca33 ("test-deployment-7778d6bf57-5kn2d_deployment-8926(6a27c0b2-b89e-41bc-a323-efcfd7dcca33)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-zgtz4\" to mount into container \"test-deployment\"" + Feb 27 18:31:25 kind-worker2 kubelet[243]: E0227 18:31:25.783868 243 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-5kn2d.1667ae584adee0a0", GenerateName:"", Namespace:"deployment-8926", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-8926", Name:"test-deployment-7778d6bf57-5kn2d", UID:"6a27c0b2-b89e-41bc-a323-efcfd7dcca33", APIVersion:"v1", ResourceVersion:"32658", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-zgtz4\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc006c1df680f3ea0, ext:798959332703, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc006c1df680f3ea0, ext:798959332703, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-8926" not found' (will not retry!) + #+end_example + +* Flake 3: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365555248759836672 +- namespace: deployment-5921 +- node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365555248759836672/artifacts/logs/kind-worker/kubelet.log + +** Failure message (A) + +#+begin_example +�[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Feb 27 07:08:24.219: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.219: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.244: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.245: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.304: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.305: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.386: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:08:24.386: INFO: observed Deployment test-deployment in namespace deployment-5921 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 07:09:24.218: FAIL: failed to see replicas of test-deployment in namespace deployment-5921 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc000238230>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365555248759836672/artifacts/logs/kind-worker/kubelet.log | \ + grep "deployment-5921" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Feb 27 07:09:33 kind-worker kubelet[246]: E0227 07:09:33.073743 246 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s6mbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-fk8kl_deployment-5921(63e3745e-84f3-4d7a-911e-1e5a864e9d52): CreateContainerConfigError: cannot find volume "kube-api-access-s6mbj" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-s6mbj + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1365555248759836672/artifacts/logs/kind-worker/kubelet.log | \ + grep "kube-api-access-s6mbj" +#+end_src + +#+RESULTS: +#+begin_example +Feb 27 07:08:24 kind-worker kubelet[246]: I0227 07:08:24.303817 246 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-s6mbj" (UniqueName: "kubernetes.io/projected/63e3745e-84f3-4d7a-911e-1e5a864e9d52-kube-api-access-s6mbj") pod "test-deployment-7778d6bf57-fk8kl" (UID: "63e3745e-84f3-4d7a-911e-1e5a864e9d52") +Feb 27 07:09:32 kind-worker kubelet[246]: I0227 07:09:32.976978 246 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-s6mbj" (UniqueName: "kubernetes.io/projected/63e3745e-84f3-4d7a-911e-1e5a864e9d52-kube-api-access-s6mbj") pod "63e3745e-84f3-4d7a-911e-1e5a864e9d52" (UID: "63e3745e-84f3-4d7a-911e-1e5a864e9d52") +Feb 27 07:09:32 kind-worker kubelet[246]: I0227 07:09:32.980908 246 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e3745e-84f3-4d7a-911e-1e5a864e9d52-kube-api-access-s6mbj" (OuterVolumeSpecName: "kube-api-access-s6mbj") pod "63e3745e-84f3-4d7a-911e-1e5a864e9d52" (UID: "63e3745e-84f3-4d7a-911e-1e5a864e9d52"). InnerVolumeSpecName "kube-api-access-s6mbj". PluginName "kubernetes.io/projected", VolumeGidValue "" +Feb 27 07:09:33 kind-worker kubelet[246]: E0227 07:09:33.073634 246 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-s6mbj ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Feb 27 07:09:33 kind-worker kubelet[246]: E0227 07:09:33.073743 246 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s6mbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-fk8kl_deployment-5921(63e3745e-84f3-4d7a-911e-1e5a864e9d52): CreateContainerConfigError: cannot find volume "kube-api-access-s6mbj" to mount into container "test-deployment" +Feb 27 07:09:33 kind-worker kubelet[246]: E0227 07:09:33.073784 246 pod_workers.go:191] Error syncing pod 63e3745e-84f3-4d7a-911e-1e5a864e9d52 ("test-deployment-7778d6bf57-fk8kl_deployment-5921(63e3745e-84f3-4d7a-911e-1e5a864e9d52)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-s6mbj\" to mount into container \"test-deployment\"" +Feb 27 07:09:33 kind-worker kubelet[246]: I0227 07:09:33.078209 246 reconciler.go:319] Volume detached for volume "kube-api-access-s6mbj" (UniqueName: "kubernetes.io/projected/63e3745e-84f3-4d7a-911e-1e5a864e9d52-kube-api-access-s6mbj") on node "kind-worker" DevicePath "" +Feb 27 07:09:33 kind-worker kubelet[246]: E0227 07:09:33.193566 246 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-fk8kl.1667892295f9f5a2", GenerateName:"", Namespace:"deployment-5921", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-5921", Name:"test-deployment-7778d6bf57-fk8kl", UID:"63e3745e-84f3-4d7a-911e-1e5a864e9d52", APIVersion:"v1", ResourceVersion:"4618", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-s6mbj\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc00699eb446473a2, ext:207273860949, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc00699eb446473a2, ext:207273860949, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-5921" not found' (will not retry!) +#+end_example + +* Flake 4: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1367581462819246080 +- namespace: deployment-4634 +- node: kind-worker2 +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1367581462819246080/artifacts/logs/kind-worker2/kubelet.log + +** Failure message (A) + +#+begin_example +�[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Mar 4 21:45:17.150: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.151: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.160: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.160: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.204: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.204: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.218: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:45:17.218: INFO: observed Deployment test-deployment in namespace deployment-4634 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 4 21:46:17.148: FAIL: failed to see replicas of test-deployment in namespace deployment-4634 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc00024a250>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1367581462819246080/artifacts/logs/kind-worker2/kubelet.log | \ + grep "deployment-4634" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 04 21:46:26 kind-worker2 kubelet[246]: E0304 21:46:26.634183 246 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-djhbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-ld5nd_deployment-4634(d794b1be-18fb-4a19-9bef-3422b2adcde9): CreateContainerConfigError: cannot find volume "kube-api-access-djhbp" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-djhbp + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1367581462819246080/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-djhbp" +#+end_src + +#+RESULTS: +#+begin_example +Mar 04 21:45:17 kind-worker2 kubelet[246]: I0304 21:45:17.242195 246 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-djhbp" (UniqueName: "kubernetes.io/projected/d794b1be-18fb-4a19-9bef-3422b2adcde9-kube-api-access-djhbp") pod "test-deployment-7778d6bf57-ld5nd" (UID: "d794b1be-18fb-4a19-9bef-3422b2adcde9") +Mar 04 21:46:26 kind-worker2 kubelet[246]: I0304 21:46:26.486182 246 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-djhbp" (UniqueName: "kubernetes.io/projected/d794b1be-18fb-4a19-9bef-3422b2adcde9-kube-api-access-djhbp") pod "d794b1be-18fb-4a19-9bef-3422b2adcde9" (UID: "d794b1be-18fb-4a19-9bef-3422b2adcde9") +Mar 04 21:46:26 kind-worker2 kubelet[246]: I0304 21:46:26.489025 246 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d794b1be-18fb-4a19-9bef-3422b2adcde9-kube-api-access-djhbp" (OuterVolumeSpecName: "kube-api-access-djhbp") pod "d794b1be-18fb-4a19-9bef-3422b2adcde9" (UID: "d794b1be-18fb-4a19-9bef-3422b2adcde9"). InnerVolumeSpecName "kube-api-access-djhbp". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 04 21:46:26 kind-worker2 kubelet[246]: I0304 21:46:26.587377 246 reconciler.go:319] Volume detached for volume "kube-api-access-djhbp" (UniqueName: "kubernetes.io/projected/d794b1be-18fb-4a19-9bef-3422b2adcde9-kube-api-access-djhbp") on node "kind-worker2" DevicePath "" +Mar 04 21:46:26 kind-worker2 kubelet[246]: E0304 21:46:26.634067 246 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-djhbp ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 04 21:46:26 kind-worker2 kubelet[246]: E0304 21:46:26.634183 246 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-djhbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-ld5nd_deployment-4634(d794b1be-18fb-4a19-9bef-3422b2adcde9): CreateContainerConfigError: cannot find volume "kube-api-access-djhbp" to mount into container "test-deployment" +Mar 04 21:46:26 kind-worker2 kubelet[246]: E0304 21:46:26.634228 246 pod_workers.go:191] Error syncing pod d794b1be-18fb-4a19-9bef-3422b2adcde9 ("test-deployment-7778d6bf57-ld5nd_deployment-4634(d794b1be-18fb-4a19-9bef-3422b2adcde9)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-djhbp\" to mount into container \"test-deployment\"" +Mar 04 21:46:26 kind-worker2 kubelet[246]: E0304 21:46:26.801055 246 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-ld5nd.166941e379195743", GenerateName:"", Namespace:"deployment-4634", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-4634", Name:"test-deployment-7778d6bf57-ld5nd", UID:"d794b1be-18fb-4a19-9bef-3422b2adcde9", APIVersion:"v1", ResourceVersion:"46695", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-djhbp\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc008732ca5cbe343, ext:1520541188005, loc:(*time.Location)(0x70f9ea0)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc008732ca5cbe343, ext:1520541188005, loc:(*time.Location)(0x70f9ea0)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-4634" not found' (will not retry!) +#+end_example + +* Flake 5: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366430000500183040 +- namespace: deployment-674 +- node: kind-worker2 +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366430000500183040/artifacts/logs/kind-worker2/kubelet.log + +** Failure message (A) + +#+begin_example +�[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Mar 1 17:21:21.044: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.044: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.047: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.048: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.058: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.058: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.085: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:21:21.085: INFO: observed Deployment test-deployment in namespace deployment-674 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 17:22:21.043: FAIL: failed to see replicas of test-deployment in namespace deployment-674 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc00023a230>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366430000500183040/artifacts/logs/kind-worker2/kubelet.log | \ + grep "deployment-674" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 01 17:22:30 kind-worker2 kubelet[242]: E0301 17:22:30.358633 242 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qwrgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-c4bqp_deployment-674(c4b074bc-69ec-4e07-a068-3789636d74f7): CreateContainerConfigError: cannot find volume "kube-api-access-qwrgt" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-qwrgt + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366430000500183040/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-qwrgt" +#+end_src + +#+RESULTS: +#+begin_example +Mar 01 17:21:21 kind-worker2 kubelet[242]: I0301 17:21:21.133467 242 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-qwrgt" (UniqueName: "kubernetes.io/projected/c4b074bc-69ec-4e07-a068-3789636d74f7-kube-api-access-qwrgt") pod "test-deployment-7778d6bf57-c4bqp" (UID: "c4b074bc-69ec-4e07-a068-3789636d74f7") +Mar 01 17:22:30 kind-worker2 kubelet[242]: I0301 17:22:30.193522 242 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-qwrgt" (UniqueName: "kubernetes.io/projected/c4b074bc-69ec-4e07-a068-3789636d74f7-kube-api-access-qwrgt") pod "c4b074bc-69ec-4e07-a068-3789636d74f7" (UID: "c4b074bc-69ec-4e07-a068-3789636d74f7") +Mar 01 17:22:30 kind-worker2 kubelet[242]: I0301 17:22:30.196603 242 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4b074bc-69ec-4e07-a068-3789636d74f7-kube-api-access-qwrgt" (OuterVolumeSpecName: "kube-api-access-qwrgt") pod "c4b074bc-69ec-4e07-a068-3789636d74f7" (UID: "c4b074bc-69ec-4e07-a068-3789636d74f7"). InnerVolumeSpecName "kube-api-access-qwrgt". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 01 17:22:30 kind-worker2 kubelet[242]: I0301 17:22:30.294136 242 reconciler.go:319] Volume detached for volume "kube-api-access-qwrgt" (UniqueName: "kubernetes.io/projected/c4b074bc-69ec-4e07-a068-3789636d74f7-kube-api-access-qwrgt") on node "kind-worker2" DevicePath "" +Mar 01 17:22:30 kind-worker2 kubelet[242]: E0301 17:22:30.358508 242 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-qwrgt ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 01 17:22:30 kind-worker2 kubelet[242]: E0301 17:22:30.358633 242 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qwrgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-c4bqp_deployment-674(c4b074bc-69ec-4e07-a068-3789636d74f7): CreateContainerConfigError: cannot find volume "kube-api-access-qwrgt" to mount into container "test-deployment" +Mar 01 17:22:30 kind-worker2 kubelet[242]: E0301 17:22:30.358670 242 pod_workers.go:191] Error syncing pod c4b074bc-69ec-4e07-a068-3789636d74f7 ("test-deployment-7778d6bf57-c4bqp_deployment-674(c4b074bc-69ec-4e07-a068-3789636d74f7)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-qwrgt\" to mount into container \"test-deployment\"" +Mar 01 17:22:30 kind-worker2 kubelet[242]: E0301 17:22:30.467979 242 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-c4bqp.166847be99981afa", GenerateName:"", Namespace:"deployment-674", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-674", Name:"test-deployment-7778d6bf57-c4bqp", UID:"c4b074bc-69ec-4e07-a068-3789636d74f7", APIVersion:"v1", ResourceVersion:"44876", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-qwrgt\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0076695955f3efa, ext:1017486367673, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0076695955f3efa, ext:1017486367673, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-674" not found' (will not retry!) +#+end_example + +* Flake 6: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366414647938256896 +- namespace: deployment-9744 +- node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366414647938256896/artifacts/logs/kind-worker/kubelet.log + +** Failure message (A) + +#+begin_example +�[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Mar 1 16:17:31.831: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.831: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.846: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.846: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.865: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.865: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.903: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:17:31.903: INFO: observed Deployment test-deployment in namespace deployment-9744 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Mar 1 16:18:31.829: FAIL: failed to see replicas of test-deployment in namespace deployment-9744 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc00023e240>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366414647938256896/artifacts/logs/kind-worker/kubelet.log | \ + grep "deployment-9744" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 01 16:18:39 kind-worker kubelet[245]: E0301 16:18:39.571316 245 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fptj9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-dnc7w_deployment-9744(9004dce7-dab8-4c38-b58d-3cfb2244143c): CreateContainerConfigError: cannot find volume "kube-api-access-fptj9" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-fptj9 + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1366414647938256896/artifacts/logs/kind-worker/kubelet.log | \ + grep "kube-api-access-fptj9" +#+end_src + +#+RESULTS: +#+begin_example +Mar 01 16:17:32 kind-worker kubelet[245]: I0301 16:17:32.001596 245 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-fptj9" (UniqueName: "kubernetes.io/projected/9004dce7-dab8-4c38-b58d-3cfb2244143c-kube-api-access-fptj9") pod "test-deployment-7778d6bf57-dnc7w" (UID: "9004dce7-dab8-4c38-b58d-3cfb2244143c") +Mar 01 16:18:38 kind-worker kubelet[245]: I0301 16:18:38.855548 245 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-fptj9" (UniqueName: "kubernetes.io/projected/9004dce7-dab8-4c38-b58d-3cfb2244143c-kube-api-access-fptj9") pod "9004dce7-dab8-4c38-b58d-3cfb2244143c" (UID: "9004dce7-dab8-4c38-b58d-3cfb2244143c") +Mar 01 16:18:38 kind-worker kubelet[245]: I0301 16:18:38.858977 245 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9004dce7-dab8-4c38-b58d-3cfb2244143c-kube-api-access-fptj9" (OuterVolumeSpecName: "kube-api-access-fptj9") pod "9004dce7-dab8-4c38-b58d-3cfb2244143c" (UID: "9004dce7-dab8-4c38-b58d-3cfb2244143c"). InnerVolumeSpecName "kube-api-access-fptj9". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 01 16:18:38 kind-worker kubelet[245]: I0301 16:18:38.956244 245 reconciler.go:319] Volume detached for volume "kube-api-access-fptj9" (UniqueName: "kubernetes.io/projected/9004dce7-dab8-4c38-b58d-3cfb2244143c-kube-api-access-fptj9") on node "kind-worker" DevicePath "" +Mar 01 16:18:39 kind-worker kubelet[245]: E0301 16:18:39.571177 245 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-fptj9 ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 01 16:18:39 kind-worker kubelet[245]: E0301 16:18:39.571316 245 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fptj9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-dnc7w_deployment-9744(9004dce7-dab8-4c38-b58d-3cfb2244143c): CreateContainerConfigError: cannot find volume "kube-api-access-fptj9" to mount into container "test-deployment" +Mar 01 16:18:39 kind-worker kubelet[245]: E0301 16:18:39.571377 245 pod_workers.go:191] Error syncing pod 9004dce7-dab8-4c38-b58d-3cfb2244143c ("test-deployment-7778d6bf57-dnc7w_deployment-9744(9004dce7-dab8-4c38-b58d-3cfb2244143c)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-fptj9\" to mount into container \"test-deployment\"" +Mar 01 16:18:39 kind-worker kubelet[245]: E0301 16:18:39.684822 245 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-dnc7w.16684442ace0883b", GenerateName:"", Namespace:"deployment-9744", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-9744", Name:"test-deployment-7778d6bf57-dnc7w", UID:"9004dce7-dab8-4c38-b58d-3cfb2244143c", APIVersion:"v1", ResourceVersion:"46169", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-fptj9\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc00762d7e20c923b, ext:1047003570284, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc00762d7e20c923b, ext:1047003570284, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-9744" not found' (will not retry!) +#+end_example + +* Flake 7: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1365800608363188224 +- namespace: deployment-820 +- node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1365800608363188224/artifacts/logs/kind-worker/kubelet.log + +** Failure message (B) + +#+begin_example +�[1mSTEP�[0m: creating a Deployment +�[1mSTEP�[0m: waiting for Deployment to be created +�[1mSTEP�[0m: waiting for all Replicas to be Ready +Feb 27 23:25:45.899: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:45.899: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:45.907: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:45.907: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:45.989: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:45.989: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:46.022: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:25:46.023: INFO: observed Deployment test-deployment in namespace deployment-820 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Feb 27 23:26:45.897: FAIL: failed to see replicas of test-deployment in namespace deployment-820 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc00023e250>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1365800608363188224/artifacts/logs/kind-worker/kubelet.log | \ + grep "deployment-820" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Feb 27 23:26:58 kind-worker kubelet[247]: E0227 23:26:58.833342 247 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-59b64,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-4cftv_deployment-820(8041ee3d-a251-4902-b45f-29f640ec168e): CreateContainerConfigError: cannot find volume "kube-api-access-59b64" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-59b64 + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1365800608363188224/artifacts/logs/kind-worker/kubelet.log | \ + grep "kube-api-access-59b64" +#+end_src + +* Flake 8: Deployment +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1364802893898584064 +- namespace: deployment-3233 +- node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1364802893898584064/artifacts/logs/kind-worker2/kubelet.log + +** Failure message (B) + +#+begin_example +�[1mSTEP�[0m: fetching the DeploymentStatus +Feb 25 05:34:10.780: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:34:10.814: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:34:10.853: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:34:10.908: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:34:10.921: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:34:10.935: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:34:10.947: INFO: observed Deployment test-deployment in namespace deployment-3233 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Feb 25 05:35:10.775: FAIL: failed to see replicas of test-deployment in namespace deployment-3233 scale to requested amount of 2 +Unexpected error: + <*errors.errorString | 0xc0002ae230>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1364802893898584064/artifacts/logs/kind-worker2/kubelet.log | \ + grep "deployment-3233" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Feb 25 05:35:19 kind-worker2 kubelet[246]: E0225 05:35:19.439596 246 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xns8t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-b68477ffb-92cmj_deployment-3233(f43e23ad-537a-49d3-9f5b-8295caa27218): CreateContainerConfigError: cannot find volume "kube-api-access-xns8t" to mount into container "test-deployment" +#+end_example + +** Volume: kube-api-access-xns8t + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1364802893898584064/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-xns8t" +#+end_src + +#+RESULTS: +#+begin_example +Feb 25 05:34:11 kind-worker2 kubelet[246]: I0225 05:34:11.064059 246 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-xns8t" (UniqueName: "kubernetes.io/projected/f43e23ad-537a-49d3-9f5b-8295caa27218-kube-api-access-xns8t") pod "test-deployment-b68477ffb-92cmj" (UID: "f43e23ad-537a-49d3-9f5b-8295caa27218") +Feb 25 05:35:18 kind-worker2 kubelet[246]: I0225 05:35:18.368328 246 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-xns8t" (UniqueName: "kubernetes.io/projected/f43e23ad-537a-49d3-9f5b-8295caa27218-kube-api-access-xns8t") pod "f43e23ad-537a-49d3-9f5b-8295caa27218" (UID: "f43e23ad-537a-49d3-9f5b-8295caa27218") +Feb 25 05:35:18 kind-worker2 kubelet[246]: I0225 05:35:18.409504 246 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f43e23ad-537a-49d3-9f5b-8295caa27218-kube-api-access-xns8t" (OuterVolumeSpecName: "kube-api-access-xns8t") pod "f43e23ad-537a-49d3-9f5b-8295caa27218" (UID: "f43e23ad-537a-49d3-9f5b-8295caa27218"). InnerVolumeSpecName "kube-api-access-xns8t". PluginName "kubernetes.io/projected", VolumeGidValue "" +Feb 25 05:35:18 kind-worker2 kubelet[246]: I0225 05:35:18.469018 246 reconciler.go:319] Volume detached for volume "kube-api-access-xns8t" (UniqueName: "kubernetes.io/projected/f43e23ad-537a-49d3-9f5b-8295caa27218-kube-api-access-xns8t") on node "kind-worker2" DevicePath "" +Feb 25 05:35:19 kind-worker2 kubelet[246]: E0225 05:35:19.439308 246 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-xns8t ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Feb 25 05:35:19 kind-worker2 kubelet[246]: E0225 05:35:19.439596 246 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xns8t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-b68477ffb-92cmj_deployment-3233(f43e23ad-537a-49d3-9f5b-8295caa27218): CreateContainerConfigError: cannot find volume "kube-api-access-xns8t" to mount into container "test-deployment" +Feb 25 05:35:19 kind-worker2 kubelet[246]: E0225 05:35:19.439673 246 pod_workers.go:191] Error syncing pod f43e23ad-537a-49d3-9f5b-8295caa27218 ("test-deployment-b68477ffb-92cmj_deployment-3233(f43e23ad-537a-49d3-9f5b-8295caa27218)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-xns8t\" to mount into container \"test-deployment\"" +Feb 25 05:35:19 kind-worker2 kubelet[246]: E0225 05:35:19.553114 246 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-b68477ffb-92cmj.1666e6d51c7f23e7", GenerateName:"", Namespace:"deployment-3233", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-3233", Name:"test-deployment-b68477ffb-92cmj", UID:"f43e23ad-537a-49d3-9f5b-8295caa27218", APIVersion:"v1", ResourceVersion:"44238", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-xns8t\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc005eba5da30fde7, ext:1027229452076, loc:(*time.Location)(0x3e93d40)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc005eba5da30fde7, ext:1027229452076, loc:(*time.Location)(0x3e93d40)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-3233" not found' (will not retry!) +#+end_example + + + +* Flake 9: ReplicationController +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368825065877016576 +- namespace: replication-controller-9476 +- node: kind-worker2 +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368825065877016576/artifacts/logs/kind-worker2/kubelet.log + +** Failure message + +#+begin_example +�[1mSTEP�[0m: creating a ReplicationController +�[1mSTEP�[0m: waiting for RC to be added +�[1mSTEP�[0m: waiting for available Replicas +Mar 8 08:00:01.557: FAIL: Wait for condition with watch events should not return an error +Unexpected error: + <*errors.errorString | 0xc00024a250>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368825065877016576/artifacts/logs/kind-worker2/kubelet.log | \ + grep "replication-controller-9476" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 08 08:00:10 kind-worker2 kubelet[244]: E0308 08:00:10.038816 244 kuberuntime_manager.go:844] container &Container{Name:rc-test,Image:k8s.gcr.io/e2e-test-images/nginx:1.14-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xs4mx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod rc-test-x7tb4_replication-controller-9476(27aebf12-640a-4101-b089-ddfbc0f0312a): CreateContainerConfigError: cannot find volume "kube-api-access-xs4mx" to mount into container "rc-test" +#+end_example + +** Volume: kube-api-access-xs4mx + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368825065877016576/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-xs4mx" +#+end_src + +#+RESULTS: +#+begin_example +Mar 08 07:58:01 kind-worker2 kubelet[244]: I0308 07:58:01.735781 244 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-xs4mx" (UniqueName: "kubernetes.io/projected/27aebf12-640a-4101-b089-ddfbc0f0312a-kube-api-access-xs4mx") pod "rc-test-x7tb4" (UID: "27aebf12-640a-4101-b089-ddfbc0f0312a") +Mar 08 08:00:08 kind-worker2 kubelet[244]: I0308 08:00:08.868869 244 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-xs4mx" (UniqueName: "kubernetes.io/projected/27aebf12-640a-4101-b089-ddfbc0f0312a-kube-api-access-xs4mx") pod "27aebf12-640a-4101-b089-ddfbc0f0312a" (UID: "27aebf12-640a-4101-b089-ddfbc0f0312a") +Mar 08 08:00:08 kind-worker2 kubelet[244]: I0308 08:00:08.871870 244 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27aebf12-640a-4101-b089-ddfbc0f0312a-kube-api-access-xs4mx" (OuterVolumeSpecName: "kube-api-access-xs4mx") pod "27aebf12-640a-4101-b089-ddfbc0f0312a" (UID: "27aebf12-640a-4101-b089-ddfbc0f0312a"). InnerVolumeSpecName "kube-api-access-xs4mx". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 08 08:00:08 kind-worker2 kubelet[244]: I0308 08:00:08.970253 244 reconciler.go:319] Volume detached for volume "kube-api-access-xs4mx" (UniqueName: "kubernetes.io/projected/27aebf12-640a-4101-b089-ddfbc0f0312a-kube-api-access-xs4mx") on node "kind-worker2" DevicePath "" +Mar 08 08:00:10 kind-worker2 kubelet[244]: E0308 08:00:10.038729 244 kubelet_pods.go:161] Mount cannot be satisfied for container "rc-test", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-xs4mx ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 08 08:00:10 kind-worker2 kubelet[244]: E0308 08:00:10.038816 244 kuberuntime_manager.go:844] container &Container{Name:rc-test,Image:k8s.gcr.io/e2e-test-images/nginx:1.14-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xs4mx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod rc-test-x7tb4_replication-controller-9476(27aebf12-640a-4101-b089-ddfbc0f0312a): CreateContainerConfigError: cannot find volume "kube-api-access-xs4mx" to mount into container "rc-test" +Mar 08 08:00:10 kind-worker2 kubelet[244]: E0308 08:00:10.038844 244 pod_workers.go:191] Error syncing pod 27aebf12-640a-4101-b089-ddfbc0f0312a ("rc-test-x7tb4_replication-controller-9476(27aebf12-640a-4101-b089-ddfbc0f0312a)"), skipping: failed to "StartContainer" for "rc-test" with CreateContainerConfigError: "cannot find volume \"kube-api-access-xs4mx\" to mount into container \"rc-test\"" +Mar 08 08:00:10 kind-worker2 kubelet[244]: E0308 08:00:10.043148 244 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"rc-test-x7tb4.166a4f1ecafa82e1", GenerateName:"", Namespace:"replication-controller-9476", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"replication-controller-9476", Name:"rc-test-x7tb4", UID:"27aebf12-640a-4101-b089-ddfbc0f0312a", APIVersion:"v1", ResourceVersion:"45062", FieldPath:"spec.containers{rc-test}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-xs4mx\" to mount into container \"rc-test\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0099442824f9ee1, ext:1037169575088, loc:(*time.Location)(0x7418700)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0099442824f9ee1, ext:1037169575088, loc:(*time.Location)(0x7418700)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "rc-test-x7tb4.166a4f1ecafa82e1" is forbidden: unable to create new content in namespace replication-controller-9476 because it is being terminated' (will not retry!) +#+end_example + +* Flake 10: ReplicationController +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1367934725221519360 +- namespace: replication-controller-5147 +- node: kind-worker2 +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1367934725221519360/artifacts/logs/kind-worker2/kubelet.log + +** Failure message + +#+begin_example +�[1mSTEP�[0m: creating a ReplicationController +�[1mSTEP�[0m: waiting for RC to be added +�[1mSTEP�[0m: waiting for available Replicas +Mar 5 20:56:36.365: FAIL: Wait for condition with watch events should not return an error +Unexpected error: + <*errors.errorString | 0xc00024a250>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1367934725221519360/artifacts/logs/kind-worker2/kubelet.log | \ + grep "replication-controller-5147" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 05 20:56:44 kind-worker2 kubelet[247]: E0305 20:56:44.417959 247 kuberuntime_manager.go:841] container &Container{Name:rc-test,Image:k8s.gcr.io/e2e-test-images/nginx:1.14-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z4fpx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod rc-test-46wcn_replication-controller-5147(cb10f7a9-c966-418a-82ff-44fab5aa124b): CreateContainerConfigError: cannot find volume "kube-api-access-z4fpx" to mount into container "rc-test" +#+end_example + +** Volume: kube-api-access-z4fpx + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1367934725221519360/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-z4fpx" +#+end_src + +#+RESULTS: +#+begin_example +Mar 05 20:54:36 kind-worker2 kubelet[247]: I0305 20:54:36.489866 247 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-z4fpx" (UniqueName: "kubernetes.io/projected/cb10f7a9-c966-418a-82ff-44fab5aa124b-kube-api-access-z4fpx") pod "rc-test-46wcn" (UID: "cb10f7a9-c966-418a-82ff-44fab5aa124b") +Mar 05 20:56:43 kind-worker2 kubelet[247]: I0305 20:56:43.828236 247 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-z4fpx" (UniqueName: "kubernetes.io/projected/cb10f7a9-c966-418a-82ff-44fab5aa124b-kube-api-access-z4fpx") pod "cb10f7a9-c966-418a-82ff-44fab5aa124b" (UID: "cb10f7a9-c966-418a-82ff-44fab5aa124b") +Mar 05 20:56:43 kind-worker2 kubelet[247]: I0305 20:56:43.831099 247 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb10f7a9-c966-418a-82ff-44fab5aa124b-kube-api-access-z4fpx" (OuterVolumeSpecName: "kube-api-access-z4fpx") pod "cb10f7a9-c966-418a-82ff-44fab5aa124b" (UID: "cb10f7a9-c966-418a-82ff-44fab5aa124b"). InnerVolumeSpecName "kube-api-access-z4fpx". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 05 20:56:43 kind-worker2 kubelet[247]: I0305 20:56:43.929543 247 reconciler.go:319] Volume detached for volume "kube-api-access-z4fpx" (UniqueName: "kubernetes.io/projected/cb10f7a9-c966-418a-82ff-44fab5aa124b-kube-api-access-z4fpx") on node "kind-worker2" DevicePath "" +Mar 05 20:56:44 kind-worker2 kubelet[247]: E0305 20:56:44.417842 247 kubelet_pods.go:161] Mount cannot be satisfied for container "rc-test", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-z4fpx ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 05 20:56:44 kind-worker2 kubelet[247]: E0305 20:56:44.417959 247 kuberuntime_manager.go:841] container &Container{Name:rc-test,Image:k8s.gcr.io/e2e-test-images/nginx:1.14-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z4fpx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod rc-test-46wcn_replication-controller-5147(cb10f7a9-c966-418a-82ff-44fab5aa124b): CreateContainerConfigError: cannot find volume "kube-api-access-z4fpx" to mount into container "rc-test" +Mar 05 20:56:44 kind-worker2 kubelet[247]: E0305 20:56:44.418001 247 pod_workers.go:191] Error syncing pod cb10f7a9-c966-418a-82ff-44fab5aa124b ("rc-test-46wcn_replication-controller-5147(cb10f7a9-c966-418a-82ff-44fab5aa124b)"), skipping: failed to "StartContainer" for "rc-test" with CreateContainerConfigError: "cannot find volume \"kube-api-access-z4fpx\" to mount into container \"rc-test\"" +Mar 05 20:56:44 kind-worker2 kubelet[247]: E0305 20:56:44.423748 247 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"rc-test-46wcn.16698dc1b0782cd5", GenerateName:"", Namespace:"replication-controller-5147", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"replication-controller-5147", Name:"rc-test-46wcn", UID:"cb10f7a9-c966-418a-82ff-44fab5aa124b", APIVersion:"v1", ResourceVersion:"33027", FieldPath:"spec.containers{rc-test}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-z4fpx\" to mount into container \"rc-test\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc008c4a318e8b4d5, ext:820421232581, loc:(*time.Location)(0x711c200)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc008c4a318e8b4d5, ext:820421232581, loc:(*time.Location)(0x711c200)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "rc-test-46wcn.16698dc1b0782cd5" is forbidden: unable to create new content in namespace replication-controller-5147 because it is being terminated' (will not retry!) +#+end_example + +* Flake 11: ReplicationController +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366829476511485952 +- namespace: replication-controller-4026 +- node: kind-worker2 +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366829476511485952/artifacts/logs/kind-worker2/kubelet.log + +** Failure message + +#+begin_example +�[1mSTEP�[0m: creating a ReplicationController +�[1mSTEP�[0m: waiting for RC to be added +�[1mSTEP�[0m: waiting for available Replicas +Mar 2 19:38:49.544: FAIL: Wait for condition with watch events should not return an error +Unexpected error: + <*errors.errorString | 0xc0002b0230>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366829476511485952/artifacts/logs/kind-worker2/kubelet.log | \ + grep "replication-controller-4026" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 02 19:38:57 kind-worker2 kubelet[245]: E0302 19:38:57.671836 245 kuberuntime_manager.go:841] container &Container{Name:rc-test,Image:k8s.gcr.io/e2e-test-images/nginx:1.14-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9b7vw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod rc-test-t24fw_replication-controller-4026(6279ef28-59af-4082-a39a-3fc8e5bbb8e4): CreateContainerConfigError: cannot find volume "kube-api-access-9b7vw" to mount into container "rc-test" +#+end_example + +** Volume: kube-api-access-9b7vw + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366829476511485952/artifacts/logs/kind-worker2/kubelet.log | \ + grep "kube-api-access-9b7vw" +#+end_src + +#+RESULTS: +#+begin_example +Mar 02 19:36:49 kind-worker2 kubelet[245]: I0302 19:36:49.684363 245 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-9b7vw" (UniqueName: "kubernetes.io/projected/6279ef28-59af-4082-a39a-3fc8e5bbb8e4-kube-api-access-9b7vw") pod "rc-test-t24fw" (UID: "6279ef28-59af-4082-a39a-3fc8e5bbb8e4") +Mar 02 19:38:57 kind-worker2 kubelet[245]: I0302 19:38:57.164498 245 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-9b7vw" (UniqueName: "kubernetes.io/projected/6279ef28-59af-4082-a39a-3fc8e5bbb8e4-kube-api-access-9b7vw") pod "6279ef28-59af-4082-a39a-3fc8e5bbb8e4" (UID: "6279ef28-59af-4082-a39a-3fc8e5bbb8e4") +Mar 02 19:38:57 kind-worker2 kubelet[245]: I0302 19:38:57.168455 245 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6279ef28-59af-4082-a39a-3fc8e5bbb8e4-kube-api-access-9b7vw" (OuterVolumeSpecName: "kube-api-access-9b7vw") pod "6279ef28-59af-4082-a39a-3fc8e5bbb8e4" (UID: "6279ef28-59af-4082-a39a-3fc8e5bbb8e4"). InnerVolumeSpecName "kube-api-access-9b7vw". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 02 19:38:57 kind-worker2 kubelet[245]: I0302 19:38:57.266102 245 reconciler.go:319] Volume detached for volume "kube-api-access-9b7vw" (UniqueName: "kubernetes.io/projected/6279ef28-59af-4082-a39a-3fc8e5bbb8e4-kube-api-access-9b7vw") on node "kind-worker2" DevicePath "" +Mar 02 19:38:57 kind-worker2 kubelet[245]: E0302 19:38:57.671707 245 kubelet_pods.go:159] Mount cannot be satisfied for container "rc-test", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-9b7vw ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 02 19:38:57 kind-worker2 kubelet[245]: E0302 19:38:57.671836 245 kuberuntime_manager.go:841] container &Container{Name:rc-test,Image:k8s.gcr.io/e2e-test-images/nginx:1.14-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9b7vw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod rc-test-t24fw_replication-controller-4026(6279ef28-59af-4082-a39a-3fc8e5bbb8e4): CreateContainerConfigError: cannot find volume "kube-api-access-9b7vw" to mount into container "rc-test" +Mar 02 19:38:57 kind-worker2 kubelet[245]: E0302 19:38:57.671877 245 pod_workers.go:191] Error syncing pod 6279ef28-59af-4082-a39a-3fc8e5bbb8e4 ("rc-test-t24fw_replication-controller-4026(6279ef28-59af-4082-a39a-3fc8e5bbb8e4)"), skipping: failed to "StartContainer" for "rc-test" with CreateContainerConfigError: "cannot find volume \"kube-api-access-9b7vw\" to mount into container \"rc-test\"" +Mar 02 19:38:57 kind-worker2 kubelet[245]: E0302 19:38:57.676549 245 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"rc-test-t24fw.16689dc56ccc747e", GenerateName:"", Namespace:"replication-controller-4026", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"replication-controller-4026", Name:"rc-test-t24fw", UID:"6279ef28-59af-4082-a39a-3fc8e5bbb8e4", APIVersion:"v1", ResourceVersion:"3528", FieldPath:"spec.containers{rc-test}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-9b7vw\" to mount into container \"rc-test\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker2"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc007c2f4680a8a7e, ext:246952707866, loc:(*time.Location)(0x3e310c0)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc007c2f4680a8a7e, ext:246952707866, loc:(*time.Location)(0x3e310c0)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'events "rc-test-t24fw.16689dc56ccc747e" is forbidden: unable to create new content in namespace replication-controller-4026 because it is being terminated' (will not retry!) +#+end_example + + +* Flake 12: Pod & PodStatus +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368241744696578048 +- Namespace: pods-9355 +- Node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368241744696578048/artifacts/logs/kind-worker/kubelet.log + +** Failure message + +#+begin_example +#�[1mSTEP�[0m: creating a Pod with a static label +�[1mSTEP�[0m: watching for Pod to be ready +Mar 6 17:12:06.810: INFO: observed Pod pod-test in namespace pods-9355 in phase Pending with labels: map[test-pod-static:true] & conditions [] +Mar 6 17:12:06.812: INFO: observed Pod pod-test in namespace pods-9355 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-03-06 17:12:06 +0000 UTC }] +Mar 6 17:12:06.836: INFO: observed Pod pod-test in namespace pods-9355 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-03-06 17:12:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-03-06 17:12:06 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-03-06 17:12:06 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-03-06 17:12:06 +0000 UTC }] +Mar 6 17:14:06.809: FAIL: failed to see Pod pod-test in namespace pods-9355 running +Unexpected error: + <*errors.errorString | 0xc0001ca250>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368241744696578048/artifacts/logs/kind-worker/kubelet.log | \ + grep "pods-9355" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 06 17:14:14 kind-worker kubelet[241]: E0306 17:14:14.668348 241 kuberuntime_manager.go:841] container &Container{Name:pod-test,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bbm4f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod pod-test_pods-9355(6c8d915b-d32d-4426-a427-7122bd79c947): CreateContainerConfigError: cannot find volume "kube-api-access-bbm4f" to mount into container "pod-test" +#+end_example + +** Volume: kube-api-access-bbm4f + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1368241744696578048/artifacts/logs/kind-worker/kubelet.log | \ + grep "kube-api-access-bbm4f" +#+end_src + +#+RESULTS: +#+begin_example +Mar 06 17:12:06 kind-worker kubelet[241]: I0306 17:12:06.867236 241 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-bbm4f" (UniqueName: "kubernetes.io/projected/6c8d915b-d32d-4426-a427-7122bd79c947-kube-api-access-bbm4f") pod "pod-test" (UID: "6c8d915b-d32d-4426-a427-7122bd79c947") +Mar 06 17:14:13 kind-worker kubelet[241]: I0306 17:14:13.925728 241 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-bbm4f" (UniqueName: "kubernetes.io/projected/6c8d915b-d32d-4426-a427-7122bd79c947-kube-api-access-bbm4f") pod "6c8d915b-d32d-4426-a427-7122bd79c947" (UID: "6c8d915b-d32d-4426-a427-7122bd79c947") +Mar 06 17:14:13 kind-worker kubelet[241]: I0306 17:14:13.928640 241 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c8d915b-d32d-4426-a427-7122bd79c947-kube-api-access-bbm4f" (OuterVolumeSpecName: "kube-api-access-bbm4f") pod "6c8d915b-d32d-4426-a427-7122bd79c947" (UID: "6c8d915b-d32d-4426-a427-7122bd79c947"). InnerVolumeSpecName "kube-api-access-bbm4f". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 06 17:14:14 kind-worker kubelet[241]: I0306 17:14:14.026790 241 reconciler.go:319] Volume detached for volume "kube-api-access-bbm4f" (UniqueName: "kubernetes.io/projected/6c8d915b-d32d-4426-a427-7122bd79c947-kube-api-access-bbm4f") on node "kind-worker" DevicePath "" +Mar 06 17:14:14 kind-worker kubelet[241]: E0306 17:14:14.668241 241 kubelet_pods.go:161] Mount cannot be satisfied for container "pod-test", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-bbm4f ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 06 17:14:14 kind-worker kubelet[241]: E0306 17:14:14.668348 241 kuberuntime_manager.go:841] container &Container{Name:pod-test,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bbm4f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod pod-test_pods-9355(6c8d915b-d32d-4426-a427-7122bd79c947): CreateContainerConfigError: cannot find volume "kube-api-access-bbm4f" to mount into container "pod-test" +Mar 06 17:14:14 kind-worker kubelet[241]: E0306 17:14:14.668374 241 pod_workers.go:191] Error syncing pod 6c8d915b-d32d-4426-a427-7122bd79c947 ("pod-test_pods-9355(6c8d915b-d32d-4426-a427-7122bd79c947)"), skipping: failed to "StartContainer" for "pod-test" with CreateContainerConfigError: "cannot find volume \"kube-api-access-bbm4f\" to mount into container \"pod-test\"" +Mar 06 17:14:14 kind-worker kubelet[241]: E0306 17:14:14.778746 241 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-test.1669d03206b1c99f", GenerateName:"", Namespace:"pods-9355", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"pods-9355", Name:"pod-test", UID:"6c8d915b-d32d-4426-a427-7122bd79c947", APIVersion:"v1", ResourceVersion:"24965", FieldPath:"spec.containers{pod-test}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-bbm4f\" to mount into container \"pod-test\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0090bf9a7d54d9f, ext:652185488130, loc:(*time.Location)(0x711e200)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0090bf9a7d54d9f, ext:652185488130, loc:(*time.Location)(0x711e200)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "pods-9355" not found' (will not retry!) +#+end_example + +* Flake 13: Pod & PodStatus +** Summary + +- https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1368655958795882496 +- namespace: pods-1784 +- Node: kind-worker +- https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1368655958795882496/artifacts/logs/kind-worker/kubelet.log + +** Failure message + +#+begin_example +�[1mSTEP�[0m: creating a Pod with a static label +�[1mSTEP�[0m: watching for Pod to be ready +Mar 7 20:44:07.131: INFO: observed Pod pod-test in namespace pods-1784 in phase Pending with labels: map[test-pod-static:true] & conditions [] +Mar 7 20:44:07.135: INFO: observed Pod pod-test in namespace pods-1784 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-03-07 20:44:07 +0000 UTC }] +Mar 7 20:44:14.388: INFO: observed Pod pod-test in namespace pods-1784 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-03-07 20:44:07 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-03-07 20:44:07 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-03-07 20:44:07 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-03-07 20:44:07 +0000 UTC }] +Mar 7 20:46:07.132: FAIL: failed to see Pod pod-test in namespace pods-1784 running +Unexpected error: + <*errors.errorString | 0xc00024a250>: { + s: "timed out waiting for the condition", + } + timed out waiting for the condition +occurred +#+end_example + +** Possible cause for the failure + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1368655958795882496/artifacts/logs/kind-worker/kubelet.log | \ + grep "pods-1784" | grep "cannot find volume" | head -1 +#+end_src + +#+RESULTS: +#+begin_example +Mar 07 20:46:14 kind-worker kubelet[244]: E0307 20:46:14.897379 244 kuberuntime_manager.go:844] container &Container{Name:pod-test,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zd68k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod pod-test_pods-1784(d55ad55b-a9c5-4b5e-af60-4c44531286f3): CreateContainerConfigError: cannot find volume "kube-api-access-zd68k" to mount into container "pod-test" +#+end_example + +** Volume: kube-api-access-zd68k + +#+BEGIN_SRC shell :results verbatim :exports both +curl https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-kind-ipv6-e2e-parallel/1368655958795882496/artifacts/logs/kind-worker/kubelet.log | \ + grep "kube-api-access-zd68k" +#+end_src + +#+RESULTS: +#+begin_example +Mar 07 20:44:07 kind-worker kubelet[244]: I0307 20:44:07.263072 244 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-zd68k" (UniqueName: "kubernetes.io/projected/d55ad55b-a9c5-4b5e-af60-4c44531286f3-kube-api-access-zd68k") pod "pod-test" (UID: "d55ad55b-a9c5-4b5e-af60-4c44531286f3") +Mar 07 20:46:13 kind-worker kubelet[244]: I0307 20:46:13.347020 244 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-zd68k" (UniqueName: "kubernetes.io/projected/d55ad55b-a9c5-4b5e-af60-4c44531286f3-kube-api-access-zd68k") pod "d55ad55b-a9c5-4b5e-af60-4c44531286f3" (UID: "d55ad55b-a9c5-4b5e-af60-4c44531286f3") +Mar 07 20:46:13 kind-worker kubelet[244]: I0307 20:46:13.349875 244 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d55ad55b-a9c5-4b5e-af60-4c44531286f3-kube-api-access-zd68k" (OuterVolumeSpecName: "kube-api-access-zd68k") pod "d55ad55b-a9c5-4b5e-af60-4c44531286f3" (UID: "d55ad55b-a9c5-4b5e-af60-4c44531286f3"). InnerVolumeSpecName "kube-api-access-zd68k". PluginName "kubernetes.io/projected", VolumeGidValue "" +Mar 07 20:46:13 kind-worker kubelet[244]: I0307 20:46:13.448323 244 reconciler.go:319] Volume detached for volume "kube-api-access-zd68k" (UniqueName: "kubernetes.io/projected/d55ad55b-a9c5-4b5e-af60-4c44531286f3-kube-api-access-zd68k") on node "kind-worker" DevicePath "" +Mar 07 20:46:14 kind-worker kubelet[244]: E0307 20:46:14.897228 244 kubelet_pods.go:161] Mount cannot be satisfied for container "pod-test", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-zd68k ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +Mar 07 20:46:14 kind-worker kubelet[244]: E0307 20:46:14.897379 244 kuberuntime_manager.go:844] container &Container{Name:pod-test,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zd68k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod pod-test_pods-1784(d55ad55b-a9c5-4b5e-af60-4c44531286f3): CreateContainerConfigError: cannot find volume "kube-api-access-zd68k" to mount into container "pod-test" +Mar 07 20:46:14 kind-worker kubelet[244]: E0307 20:46:14.897417 244 pod_workers.go:191] Error syncing pod d55ad55b-a9c5-4b5e-af60-4c44531286f3 ("pod-test_pods-1784(d55ad55b-a9c5-4b5e-af60-4c44531286f3)"), skipping: failed to "StartContainer" for "pod-test" with CreateContainerConfigError: "cannot find volume \"kube-api-access-zd68k\" to mount into container \"pod-test\"" +Mar 07 20:46:15 kind-worker kubelet[244]: E0307 20:46:15.011549 244 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-test.166a2a5840bc1490", GenerateName:"", Namespace:"pods-1784", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"pods-1784", Name:"pod-test", UID:"d55ad55b-a9c5-4b5e-af60-4c44531286f3", APIVersion:"v1", ResourceVersion:"42275", FieldPath:"spec.containers{pod-test}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-zd68k\" to mount into container \"pod-test\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0096cc5b57bb890, ext:992081372452, loc:(*time.Location)(0x7418700)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0096cc5b57bb890, ext:992081372452, loc:(*time.Location)(0x7418700)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "pods-1784" not found' (will not retry!) +#+end_example diff --git a/research/k8s/tracking-flakes-for-deployment-lifecycle-test.org b/research/k8s/tracking-flakes-for-deployment-lifecycle-test.org new file mode 100644 index 0000000..17eff71 --- /dev/null +++ b/research/k8s/tracking-flakes-for-deployment-lifecycle-test.org @@ -0,0 +1,271 @@ +#+TITLE: Tracking flakes for Deployment Lifecycle Test + + +* Summary + +Locating the root cause of an e2e test flake is hard. +The following process looks to provide a clear process by using the [[https://github.com/brianpursley/k8s-e2e-log-combiner][k8s-e2e-log-combiner]] container, which pulls the e2e test logs together into a single sequential file. +Various code blocks will help filter the combined log file into a various files for closer inspection. + +* Deployment Lifecycle Test: Overview + +- Test Grid: https://testgrid.k8s.io/sig-release-master-blocking#gce-cos-master-default&include-filter-by-regex=should.run.*lifecycle.*Deployment&width=5 +- e2e test: https://github.com/kubernetes/kubernetes/blob/2f263b24a7aab78c794fb90339c176678de6bf8e/test/e2e/apps/deployment.go#L176 +- [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance] + +* Deployment Lifecycle Test: Success + +Before exploring test flakes it's nice to have a clear list and order of the what componments the cluster exercised during the test run. +=release-master-blocking/kind-master-parallel= has some test flakes that will be investigated later. +First, select a recent passing prow job. + +** Combine Prow Logs together + +#+BEGIN_SRC shell :results silent :async t +docker run brianpursley/k8s-e2e-log-combiner https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1367182553303224320 > ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass.log +#+END_SRC + +** Filter logs for 'Conformance' + +To help track/note events later we will included a set of line numbers in the resulting log file. + +#+BEGIN_SRC shell :results silent :async t +nl ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass.log | \ + grep -a 'Conformance' > ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass-conformance.log +#+END_SRC + +** Locate the start/end of the test run + +#+BEGIN_SRC shell :results verbatim :exports both +cat ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass-conformance.log | grep -a Deployment | grep -a lifecycle | tail -3 +#+END_SRC + +#+RESULTS: +#+begin_example +200721 19:02:01.402000000 [/build-log.txt] [It] should run the lifecycle of a Deployment [Conformance] +207667 19:02:19.423000000 [/build-log.txt] should run the lifecycle of a Deployment [Conformance] +207670 19:02:19.423000000 [/build-log.txt] {"msg":"PASSED [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]","total":-1,"completed":8,"skipped":70,"failed":0} +#+end_example + +The above results provide 200721 (start) and 207670 (end) of the test run. +Rounding those numbers to the nearest 5k provides a small buffer around the test to help understand the state of the cluster before/after the test run. + +** Trim log file to Deployment Lifecycle Test run + +#+BEGIN_SRC shell :results silent :async t +nl ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass.log | \ + head -210000 | tail -n +200000 > ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass-deployment-lifecycle.log +#+END_SRC + +A short review of the logs will locate the namespace used by the deployment lifecycle test, =deployment-197= + +** Short summary of deployment lifecycle test + +This filter provides a short log of the steps taken in testing the Deployment lifecycle. + +#+BEGIN_SRC shell :results silent :async t +cat ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass-deployment-lifecycle.log | \ + grep -a "deployment-197" > ci-kubernetes-kind-e2e-parallel-1367182553303224320-pass-deployment-197.log +#+END_SRC + +* Deployment Lifecycle Test: Fail #1 + +Select a recent failing prow job from =release-master-blocking/kind-master-parallel= + +** Combine Prow Logs together + +#+BEGIN_SRC shell :results silent :async t +PROW_URL="https://prow.k8s.io/view/gs/kubernetes-jenkins/logs" +PROW_JOB="ci-kubernetes-kind-e2e-parallel/1366153928210649088" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" +LOG_DIR="/tmp/logs" + +docker run brianpursley/k8s-e2e-log-combiner ${PROW_URL}/${PROW_JOB} > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1.log +#+END_SRC + +** Filter log for 'Conformance' + +To help track/note events later we will included a set of line numbers in the resulting log file. + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +nl ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1.log | \ + grep -a 'Conformance' > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-conformance.log +#+END_SRC + +** Locate the start/end of the test run + +*** Test Start + +#+BEGIN_SRC shell :results verbatim :exports both +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-conformance.log | grep -a "\[It\] should run the lifecycle of a Deployment" | tail -4 +#+END_SRC + +#+RESULTS: +#+begin_example +112105 22:51:10.708000000 [/build-log.txt] [It] should run the lifecycle of a Deployment [Conformance] +482857 23:07:37.019000000 [/build-log.txt] [Fail] [sig-apps] Deployment [It] should run the lifecycle of a Deployment [Conformance]  +#+end_example + + +The above result provides the start of the test run at 112105, rounding to 110,000 + +*** Test End + +#+BEGIN_SRC shell :results verbatim :exports both +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-conformance.log | grep -a "Deployment should" | grep -a lifecycle | grep -v "PASSED" | tail -10 +#+END_SRC + +#+RESULTS: +#+begin_example +126652 22:52:10.748000000 [/build-log.txt] {"msg":"FAILED [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]","total":-1,"completed":5,"skipped":50,"failed":1,"failures":["[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]"]} +#+end_example + +The above result provides the end point in the test run at 126652, rounding to 130,000 + +** Trim log file to Deployment Lifecycle Test run + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" +nl ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1.log | \ + head -130000 | tail -n +110000 > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle.log +#+END_SRC + +A short review of the logs will locate the namespace used by the deployment lifecycle test, =deployment-2394= + +** Short summary of deployment lifecycle test + +This filter provides a short log of the steps taken in testing the Deployment lifecycle. + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle.log | \ + grep -a "deployment-2394" > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-2394.log +#+END_SRC + +** Filtering logs futher +*** Focus on the node running the test + +The test is using node =kind-worker= so lets's remove logs for =kind-worker2= + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle.log | \ + grep -v "kind-worker2" > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-no-kind-worker2.log +#+END_SRC + +*** Locate pod details + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-no-kind-worker2.log | \ + grep -a "deployment-2394/test-deployment-" > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-pod-details.log +#+END_SRC + +*** Locate pod events + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-no-kind-worker2.log | \ + grep -a "test-deployment-7778d6bf57-" > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-pod-events.log +#+END_SRC + +** Locate test failures +*** Error: cannot find volume + +#+BEGIN_SRC shell :results verbatim :exports both +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +grep -a "cannot find volume" ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-pod-events.log | tail -1 +#+END_SRC + +#+RESULTS: +#+begin_example +128903 22:52:20.825451000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.825451 243 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-fqqvk.16680b29cf00e812", GenerateName:"", Namespace:"deployment-2394", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-2394", Name:"test-deployment-7778d6bf57-fqqvk", UID:"dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8", APIVersion:"v1", ResourceVersion:"12891", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-2394" not found' (will not retry!) +#+end_example + +*** Locate volume + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +grep -a "kube-api-access-2t44s" ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle.log > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-issue-volume.log +#+END_SRC + +*** Locate when the volume was around + +#+BEGIN_SRC shell :results verbatim :exports both +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +head -c 15 ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-issue-volume.log && echo +tail -1 ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-issue-volume.log | head -c 15 && echo +#+END_SRC + +#+RESULTS: +#+begin_example +112951 22:51:11 +128903 22:52:20 +#+end_example + +*** Full filter for volume + +#+BEGIN_SRC shell :results silent :async t +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +nl ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1.log | \ + head -128903 | \ + tail -n +112951 > ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-track-volume-issue.log +#+END_SRC + +*** Volume deleted before pod requests uses it + +#+BEGIN_SRC shell :results verbatim :exports both +LOG_DIR="/tmp/logs" +PROW_JOB_BASE_NAME="ci-kubernetes-kind-e2e-parallel-1366153928210649088" + +cat ${LOG_DIR}/${PROW_JOB_BASE_NAME}-fail1-deployment-lifecycle-track-volume-issue.log | \ + grep "2t44s" +#+END_SRC + +#+RESULTS: +#+begin_example +112951 22:51:11.755606000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:51:11 kind-worker kubelet[243]: I0228 22:51:11.755606 243 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") pod "test-deployment-7778d6bf57-fqqvk" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8") +112952 22:51:11.755606000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:51:11 kind-worker kubelet[243]: I0228 22:51:11.755606 243 reconciler.go:224] operationExecutor.VerifyControllerAttachedVolume started for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") pod "test-deployment-7778d6bf57-fqqvk" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8") +126665 22:52:10.761000000 [/build-log.txt] &Pod{ObjectMeta:{test-deployment-7778d6bf57-fqqvk test-deployment-7778d6bf57- deployment-2394 dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8 13303 0 2021-02-28 22:51:10 +0000 UTC map[pod-template-hash:7778d6bf57 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7778d6bf57 60e4eda1-6635-4b70-9862-62db0dfbf84e 0xc001d5e9b7 0xc001d5e9b8}] [] [{kube-controller-manager Update v1 2021-02-28 22:51:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"60e4eda1-6635-4b70-9862-62db0dfbf84e\"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}} {kubelet Update v1 2021-02-28 22:51:21 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2t44s,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2t44s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kind-worker,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-28 22:51:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-28 22:51:10 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [test-deployment],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-28 22:51:10 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [test-deployment],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-02-28 22:51:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.18.0.3,PodIP:,StartTime:2021-02-28 22:51:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +128480 22:52:19.542102000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.542102 243 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") pod "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8") +128481 22:52:19.542102000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.542102 243 reconciler.go:196] operationExecutor.UnmountVolume started for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") pod "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8") +128495 22:52:19.560271652 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:19 kind-worker systemd[1]: var-lib-kubelet-pods-dc0a9362\x2dfa20\x2d419e\x2d8ab4\x2d7f2a4f27c9b8-volumes-kubernetes.io\x7eprojected-kube\x2dapi\x2daccess\x2d2t44s.mount: Succeeded. +128498 22:52:19.560997000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.560997 243 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s" (OuterVolumeSpecName: "kube-api-access-2t44s") pod "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8"). InnerVolumeSpecName "kube-api-access-2t44s". PluginName "kubernetes.io/projected", VolumeGidValue "" +128499 22:52:19.560997000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.560997 243 operation_generator.go:829] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s" (OuterVolumeSpecName: "kube-api-access-2t44s") pod "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8" (UID: "dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8"). InnerVolumeSpecName "kube-api-access-2t44s". PluginName "kubernetes.io/projected", VolumeGidValue "" +128566 22:52:19.642678000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.642678 243 reconciler.go:319] Volume detached for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") on node "kind-worker" DevicePath "" +128567 22:52:19.642678000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:19 kind-worker kubelet[243]: I0228 22:52:19.642678 243 reconciler.go:319] Volume detached for volume "kube-api-access-2t44s" (UniqueName: "kubernetes.io/projected/dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8-kube-api-access-2t44s") on node "kind-worker" DevicePath "" +128813 22:52:20.606772000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606772 243 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-2t44s ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +128814 22:52:20.606772000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606772 243 kubelet_pods.go:159] Mount cannot be satisfied for container "test-deployment", because the volume is missing (ok=false) or the volume mounter (vol.Mounter) is nil (vol={Mounter: BlockVolumeMapper: SELinuxLabeled:false ReadOnly:false InnerVolumeSpecName:}): {Name:kube-api-access-2t44s ReadOnly:true MountPath:/var/run/secrets/kubernetes.io/serviceaccount SubPath: MountPropagation: SubPathExpr:} +128815 22:52:20.606897000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606897 243 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2t44s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-fqqvk_deployment-2394(dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8): CreateContainerConfigError: cannot find volume "kube-api-access-2t44s" to mount into container "test-deployment" +128816 22:52:20.606897000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606897 243 kuberuntime_manager.go:841] container &Container{Name:test-deployment,Image:k8s.gcr.io/e2e-test-images/agnhost:2.28,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2t44s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod test-deployment-7778d6bf57-fqqvk_deployment-2394(dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8): CreateContainerConfigError: cannot find volume "kube-api-access-2t44s" to mount into container "test-deployment" +128817 22:52:20.606931000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606931 243 pod_workers.go:191] Error syncing pod dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8 ("test-deployment-7778d6bf57-fqqvk_deployment-2394(dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"" +128818 22:52:20.606931000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.606931 243 pod_workers.go:191] Error syncing pod dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8 ("test-deployment-7778d6bf57-fqqvk_deployment-2394(dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8)"), skipping: failed to "StartContainer" for "test-deployment" with CreateContainerConfigError: "cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"" +128902 22:52:20.825451000 [/artifacts/logs/kind-worker/journal.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.825451 243 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-fqqvk.16680b29cf00e812", GenerateName:"", Namespace:"deployment-2394", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-2394", Name:"test-deployment-7778d6bf57-fqqvk", UID:"dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8", APIVersion:"v1", ResourceVersion:"12891", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-2394" not found' (will not retry!) +128903 22:52:20.825451000 [/artifacts/logs/kind-worker/kubelet.log] Feb 28 22:52:20 kind-worker kubelet[243]: E0228 22:52:20.825451 243 event.go:264] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"test-deployment-7778d6bf57-fqqvk.16680b29cf00e812", GenerateName:"", Namespace:"deployment-2394", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"deployment-2394", Name:"test-deployment-7778d6bf57-fqqvk", UID:"dc0a9362-fa20-419e-8ab4-7f2a4f27c9b8", APIVersion:"v1", ResourceVersion:"12891", FieldPath:"spec.containers{test-deployment}"}, Reason:"Failed", Message:"Error: cannot find volume \"kube-api-access-2t44s\" to mount into container \"test-deployment\"", Source:v1.EventSource{Component:"kubelet", Host:"kind-worker"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xc0072589242b8012, ext:380893991846, loc:(*time.Location)(0x3e95d80)}}, Count:1, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'namespaces "deployment-2394" not found' (will not retry!) +#+end_example diff --git a/research/k8s/tracking-flakes-for-podstatus-lifecycle-test.org b/research/k8s/tracking-flakes-for-podstatus-lifecycle-test.org new file mode 100644 index 0000000..bcda543 --- /dev/null +++ b/research/k8s/tracking-flakes-for-podstatus-lifecycle-test.org @@ -0,0 +1,81 @@ +#+TITLE: Tracking flakes for PodStatus Lifecycle Test + + +* Summary + +Locating the root cause of an e2e test flake is hard. +The following process looks to provide a clear process by using the [[https://github.com/brianpursley/k8s-e2e-log-combiner][k8s-e2e-log-combiner]] container, which pulls the e2e test logs together into a single sequential file. + +* Pod Status Test + +- e2e test: https://github.com/kubernetes/kubernetes/blob/3c514ae588fdd2867eb9cced1c43c9ee2276a7ce/test/e2e/common/pods.go#L895 +- [It] should run through the lifecycle of Pods and PodStatus [Conformance] + +* PodStatus Succes + +- Kubernetes e2e suite: [k8s.io] Pods should run through the lifecycle of Pods and PodStatus [Conformance] (5sec) + +** Combine Prow Logs + +#+BEGIN_SRC shell :results silent :async t +docker run brianpursley/k8s-e2e-log-combiner https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-kind-e2e-parallel/1366844842662957056 > ci-kubernetes-kind-e2e-parallel-1366844842662957056-pass.log +#+END_SRC + +** Filter logs to test namespace (pods-1311) + +#+BEGIN_SRC shell :results silent :async t +nl ci-kubernetes-kind-e2e-parallel-1366844842662957056-pass.log | grep -a 'pods-5320' | nl > ci-kubernetes-kind-e2e-parallel-1366844842662957056-pass-pods-5320.log +#+END_SRC + +** Passing Test Sequence (filtered summary) + +#+BEGIN_SRC text +eventhandlers.go:164] "Add event for unscheduled pod" pod="pods-5320/pod-test" +eventhandlers.go:164] "Add event for unscheduled pod" pod="pods-5320/pod-test" + +scheduling_queue.go:812] "About to try and schedule pod" pod="pods-5320/pod-test" +scheduling_queue.go:812] "About to try and schedule pod" pod="pods-5320/pod-test" + +scheduler.go:457] "Attempting to schedule pod" pod="pods-5320/pod-test" +scheduler.go:457] "Attempting to schedule pod" pod="pods-5320/pod-test" + +default_binder.go:51] "Attempting to bind pod to node" pod="pods-5320/pod-test" node="kind-worker2" +default_binder.go:51] "Attempting to bind pod to node" pod="pods-5320/pod-test" node="kind-worker2" + +INFO: observed Pod pod-test in namespace pods-5320 in phase Pending with labels: map[test-pod-static:true] & conditions [] +INFO: observed Pod pod-test in namespace pods-5320 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-03-02 20:38:46 +0000 UTC }] + +eventhandlers.go:201] "Delete event for unscheduled pod" pod="pods-5320/pod-test" +eventhandlers.go:201] "Delete event for unscheduled pod" pod="pods-5320/pod-test" +eventhandlers.go:221] "Add event for scheduled pod" pod="pods-5320/pod-test" +eventhandlers.go:221] "Add event for scheduled pod" pod="pods-5320/pod-test" + +scheduler.go:602] "Successfully bound pod to node" pod="pods-5320/pod-test" node="kind-worker2" evaluatedNodes=3 feasibleNodes=2 +scheduler.go:602] "Successfully bound pod to node" pod="pods-5320/pod-test" node="kind-worker2" evaluatedNodes=3 feasibleNodes=2 + +containerd[172]: time="2021-03-02T20:38:48.493614510Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:pod-test,Uid:0b8094cc-283c-4863-94ea-dc4bfe7ed11a,Namespace:pods-5320,Attempt:0,}" +containerd[172]: time="2021-03-02T20:38:48.493614510Z" level=info msg="RunPodsandbox for &PodSandboxMetadata{Name:pod-test,Uid:0b8094cc-283c-4863-94ea-dc4bfe7ed11a,Namespace:pods-5320,Attempt:0,}" +containerd[172]: time="2021-03-02T20:38:48.739386844Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:pod-test,Uid:0b8094cc-283c-4863-94ea-dc4bfe7ed11a,Namespace:pods-5320,Attempt:0,} returns sandbox id \"45e0978f1e8c9fa2179bf1fd9f1a5cfb02a0c2e6a668185b75f0fd8e1c311801\"" +containerd[172]: time="2021-03-02T20:38:48.739386844Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:pod-test,Uid:0b8094cc-283c-4863-94ea-dc4bfe7ed11a,Namespace:pods-5320,Attempt:0,} returns sandbox id \"45e0978f1e8c9fa2179bf1fd9f1a5cfb02a0c2e6a668185b75f0fd8e1c311801\"" + +INFO: Found Pod pod-test in namespace pods-5320 in phase Running with labels: map[test-pod-static:true] & conditions [{Initialized True 0001->305c8.log] + +eventhandlers.go:279] "Delete event for scheduled pod" pod="pods-5320/pod-test" +eventhandlers.go:279] "Delete event for scheduled pod" pod="pods-5320/pod-test" +#+END_SRC +* Test Grid: gce-cos-master-blocking + +- https://testgrid.k8s.io/sig-release-master-blocking#gce-cos-master-default&width=5&include-filter-by-regex=should.*lifecycle.*PodStatus + +** Combine Prow Logs + +#+BEGIN_SRC shell :results silent :async t +docker run brianpursley/k8s-e2e-log-combiner https://prow.k8s.io/view/gs/kubernetes-jenkins/logs/ci-kubernetes-e2e-gci-gce/1364703488960892928 > ci-kubernetes-e2e-gci-gce-1364703488960892928-fail.log +#+END_SRC + +** Filter logs to test namespace (pods-1311) + +#+BEGIN_SRC shell :results silent :async t +nl ci-kubernetes-e2e-gci-gce-1364703488960892928-fail.log | grep -a 'pods-1311' | nl > ci-kubernetes-e2e-gci-gce-1364703488960892928-fail-pods-1311.log +#+END_SRC + diff --git a/research/k8sio-ensure-staging-pii-gcs.org b/research/k8sio-ensure-staging-pii-gcs.org new file mode 100644 index 0000000..38aa69f --- /dev/null +++ b/research/k8sio-ensure-staging-pii-gcs.org @@ -0,0 +1,89 @@ +#+TITLE: K8sio Ensure Staging Pii Gcs +This is document to work through +https://github.com/kubernetes/k8s.io/issues/904#issuecomment-859932423 + +Goal: +- Ensure a bucket exist for the logs inside each project +- Enable logging for k8s-staging-* buckets +- Ensure access is gated to limit access per #2031 + +* Investigating what exists +** I see yaml with all staging projects +https://github.com/kubernetes/k8s.io/blob/main/infra/gcp/infra.yaml +** I see the ensure_staging_project() function now contains the simple ensure_staging_gcs_bucket, +https://github.com/kubernetes/k8s.io/blob/77220ac9f954e00a6a4bd854bd74c2566034dab2/infra/gcp/ensure-staging-storage.sh#L107 +** I see the bucket extensions defined: +https://github.com/kubernetes/k8s.io/blob/77220ac9f954e00a6a4bd854bd74c2566034dab2/infra/gcp/ensure-staging-storage.sh#L116 +** I see a role binding used to allow access to logs in prod +# Special case: empower k8s-infra-gcs-access-logs@kubernetes.io to read k8s-artifacts-gcslogs + # k8s-artifacts-gcslogs receive and store Cloud Audit logs for k8s-artificats-prod. + ensure_gcs_role_binding "gs://k8s-artifacts-gcslogs" \ + "group:k8s-infra-gcs-access-logs@kubernetes.io" \ + "objectViewer" +** Enabling this service will allow logging to start. +#+begin_example +readonly PROD_PROJECT_SERVICES=( + # prod projects may perform container analysis + containeranalysis.googleapis.com + # prod projects host containers in GCR + containerregistry.googleapis.com + # prod projects host binaries in GCS + storage-component.googleapis.com +) +#+end_example +** I still need to make sure the logs is private + +* Next steps: +** Add log bucket for each stg project +This looks like updating +https://github.com/kubernetes/k8s.io/blob/77220ac9f954e00a6a4bd854bd74c2566034dab2/infra/gcp/ensure-staging-storage.sh#L116 +to have the log bucket line added +#+begin_example + # The names of the buckets + local staging_bucket="gs://${project}" # used by humans + local staging_log_bucket="gs://${project}-log" # used logs + local gcb_bucket="gs://${project}-gcb" # used by GCB +#+end_example +** Add the services to start logging +Looks like this will be updating +https://github.com/kubernetes/k8s.io/blob/77220ac9f954e00a6a4bd854bd74c2566034dab2/infra/gcp/ensure-staging-storage.sh#L53 +Adding the last 3 container services below +#+begin_example +readonly STAGING_PROJECT_SERVICES=( + # These projects use GCB to build/push images to GCR + cloudbuild.googleapis.com + # Some GCB jobs may use KMS + cloudkms.googleapis.com + # These projects host images in GCR + containerregistry.googleapis.com + # Some GCB jobs may use Secret Manager (preferred over KMS) + secretmanager.googleapis.com + # These projects may host binaries in GCS + storage-component.googleapis.com + # projects may perform container analysis + containeranalysis.googleapis.com + # projects host containers in GCR + containerregistry.googleapis.com + # projects host binaries in GCS + storage-component.googleapis.com +) +#+end_example +** Add roll binding so ii can view +Looks like updating +https://github.com/kubernetes/k8s.io/blob/77220ac9f954e00a6a4bd854bd74c2566034dab2/infra/gcp/ensure-staging-storage.sh#L311 +Adding a new entry to the staging_special list +#+begin_example +# Special case: empower k8s-infra-gcs-access-logs@kubernetes.io to read k8s-artifacts-gcslogs + # k8s-artifacts-gcslogs receive and store Cloud Audit logs for k8s-artificats-prod. + ensure_gcs_role_binding "gs://k8s-artifacts-gcslogs" \ + "group:k8s-infra-gcs-access-logs@kubernetes.io" \ + "objectViewer" + +#+end_example +I ended up putting inside the ensure_staging_project so the permission gets set for each bucket +Question: +Would we want to put this in the special_case_func group so we can exclude buckets? +#+begin_example + # Ensure access to audit logs + ensure_gcs_role_binding "gs://${project}" "group:k8s-infra-gcs-access-logs@kubernetes.io" "objectViewer" +#+end_example diff --git a/research/logdata_transformations.org b/research/logdata_transformations.org new file mode 100644 index 0000000..0e4ca01 --- /dev/null +++ b/research/logdata_transformations.org @@ -0,0 +1,106 @@ +#+TITLE: Big Query Scratch +Goal is a file that will be easy starting place for bq work +* Log in to gcloud +Login to gcloud +#+BEGIN_SRC tmate :window prepare +gcloud auth login +#+END_SRC +Set the project +#+BEGIN_SRC tmate :window prepare +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC +* Make sure we are in the right project in bq +#+BEGIN_SRC tmate :window prepare +bq ls +#+END_SRC + +* Data transformation of log data + +**** Goal: +create a `usage_all` table that include all `usage_all` columns plus new culomns for ASN, ASN Company name. Where the ASN exist in https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io/infra/meta/asns the Company name should be replaced by the name in the `.yaml` file. This is to support the sorting of the ASN companies with `.yaml` file into one group in Data Studio. +***** Sub goal: +Add a column for image_name with regex 'REGEXP_EXTRACT(cs_referer,'^https://(?:[^/]+)/+v2/(?:k8s-artifacts-prod/+)*([a-z0-9-/]+)/blobs/sha256:(?:[a-f0-9]+)')' This is however not critical and would be easy to achive in Data Studio + + +*** 1. Output to a table - Find distinct IPs from `usage_all` data +Extracting dictinct IPs addresses from the 'usage_all' table. +A count of the number of logs per IPs is also added, this is not used late on and is done for interist sake. +Currently using `bb_test.usage_all` table. Must be change when automated 'usage' table is avalible + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.1_etl_test_distinct_ip_org 'SELECT DISTINCT c_ip, COUNT(c_ip) AS Total_Count FROM `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.usage_all` GROUP BY c_ip ORDER BY Total_Count DESC' +#+END_SRC + + + +*** 2. Output to a table - Change distinct IP to int. +To be able to lookup asn data the list of distinct IPs is converted to integers. + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.2_etl_test_distinct_ip_and_int_org 'SELECT *, NET.IPV4_TO_INT64(NET.IP_FROM_STRING(c_ip)) AS c_ip_int FROM `k8s-infra-ii-sandbox.riaan_data_store.1_etl_test_distinct_ip_org` WHERE REGEXP_CONTAINS(c_ip, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}")' +#+END_SRC + + + +*** 3. Output to a table - Join potaroo data with .yaml names +To be able to group the companies with .yaml files in the K8s/infra/meta/asn folder, the names are added to the potaroo table in a new column. +Using `etl_script_generated_set_riaan.potaroo_all_asn_name` must be move to `etl_script_generated_set.potaroo_all_asn_name` when it is avalible. + + + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.3_etl_test_potaroo_with_yaml_names_org 'SELECT asn, companyname, name_yaml FROM ( SELECT asn, companyname FROM `k8s-infra-ii-sandbox.etl_script_generated_set_riaan.potaroo_all_asn_name`) A LEFT OUTER JOIN ( SELECT asn_yaml, name_yaml FROM `k8s-infra-ii-sandbox.etl_staging.k8s_repo_yaml_20210609`) B ON A.asn=B.asn_yaml' +#+END_SRC + + + +*** 4. Output to a table - Join potaroo and yaml name in to one column +This step join the potaroo company name table and the .yaml name table into a single column. +With more advance SQL-foo it could be combined with the previous step. + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.4_etl_test_potaroo_with_yaml_names_in_one_column_org 'SELECT A.asn, A.companyname, case when name_yaml is not null then name_yaml else B.companyname end as name_with_yaml_name FROM ( SELECT asn, companyname FROM `k8s-infra-ii-sandbox.riaan_data_store.3_etl_test_potaroo_with_yaml_names_org`) A LEFT JOIN ( SELECT asn, companyname, name_yaml FROM `k8s-infra-ii-sandbox.riaan_data_store.3_etl_test_potaroo_with_yaml_names_org`) B ON A.asn=B.asn' +#+END_SRC + + + +*** 5. Output to a table - Join verdor with company name +The `vendor` table have the start and end IPs 'int' format and the ASN number. This step add the company names from the updated potaroo table. +Curently using `etl_script_generated_set_riaan.vendor` should move to `etl_script_generated_set.vendor` when it is avalible. + + + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.5_etl_test_vendor_with_company_name_org 'SELECT A.asn, cidr_ip, start_ip, end_ip, start_ip_int, end_ip_int,name_with_yaml_name FROM ( SELECT asn, cidr_ip, start_ip, end_ip, start_ip_int, end_ip_int FROM `k8s-infra-ii-sandbox.etl_script_generated_set_riaan.vendor`) A LEFT OUTER JOIN ( SELECT asn, name_with_yaml_name FROM `k8s-infra-ii-sandbox.riaan_data_store.4_etl_test_potaroo_with_yaml_names_in_one_column_org`) B ON A.asn=B.asn' +#+END_SRC + + + + +*** 6. Output to a table - Join ASN data to distinct IPs +This step use the updated verdor table with company names to add 'company name' and 'asn number' to the the list of distinct IP addresses. +There is insuficient BQ resource to complete this query. Tested it with 'LIMIT 1000' + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.6_etl_test_join_asn_and_distinct_ip_org 'SELECT c_ip, Total_Count, c_ip_int,asn, name_with_yaml_name FROM`k8s-infra-ii-sandbox.riaan_data_store.2_etl_test_distinct_ip_and_int_org`, `k8s-infra-ii-sandbox.riaan_data_store.5_etl_test_vendor_with_company_name_org` WHERE c_ip_int >=start_ip_int AND c_ip_int <=end_ip_int LIMIT 1000' +#+END_SRC + + + + +*** 7. Output to a table - Join the IP / ASN / Company name with the `usage_all` table +This step updated the `usage_all` with company name and asn from the `distinc IP table + company name + asn` table. +There is insuficient BQ resource to complete this query. Tested it with `LIMIT 1000` +Using `etl_staging.usage_all_20210608` must move to autogenerated `usage_all` when avalible + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.7_etl_test_join_usage_all_with_asn_and_company_name_org 'SELECT time_micros, A.c_ip, c_ip_type, c_ip_region, cs_method, cs_uri, sc_status, cs_bytes, sc_bytes, time_taken_micros, cs_host, cs_referer, cs_user_agent, s_request_id, cs_operation, cs_bucket, cs_object, asn, name_with_yaml_name FROM ( SELECT time_micros, c_ip, c_ip_type, c_ip_region, cs_method, cs_uri, sc_status, cs_bytes, sc_bytes, time_taken_micros, cs_host, cs_referer, cs_user_agent, s_request_id, cs_operation, cs_bucket, cs_object FROM `k8s-infra-ii-sandbox.etl_staging.usage_all_20210608` ) A LEFT OUTER JOIN ( SELECT asn, c_ip,name_with_yaml_name FROM `k8s-infra-ii-sandbox.riaan_data_store.6_etl_test_join_asn_and_distinct_ip_org`) B ON A.c_ip=B.c_ip LIMIT 1000 ' +#+END_SRC + + +*** 8. Transform cs_referer to image name - Bonus step +Work in BQ GUI, fail in .org: `syntax error near unexpected token `(' ` + +#+BEGIN_SRC tmate :window prepare +bq query --nouse_legacy_sql --destination_table riaan_data_store.8_etl_test_join_usage_all_with_asn_and_company_name_and_image_name_org 'SELECT *, REGEXP_EXTRACT (cs_referer,'^https://(?:[^/]+)/+v2/(?:k8s-artifacts-prod/+)*([a-z0-9-/]+)/blobs/sha256:(?:[a-f0-9]+)') AS image_name FROM `k8s-infra-ii-sandbox.riaan_data_store.7_etl_test_join_usage_all_with_asn_and_company_name_org` ' +#+END_SRC diff --git a/research/mailserver.org b/research/mailserver.org new file mode 100644 index 0000000..b82702b --- /dev/null +++ b/research/mailserver.org @@ -0,0 +1,223 @@ +#+TITLE: Mailserver + +https://github.com/cyrusimap/cyrus-imapd + +* Group-Office +#+begin_SRC yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: dbdata +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Secret +metadata: + name: db-env +stringData: + MYSQL_HOST: mariadb + MYSQL_DATABASE: groupoffice + MYSQL_PASSWORD: groupoffice + MYSQL_USER: groupoffice +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: mariadb + name: db +spec: + ports: + - name: "mariadb" + port: 3306 + targetPort: 3306 + selector: + app: mariadb +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: mariadb + name: mariadb +spec: + serviceName: "mariadb" + replicas: 1 + selector: + matchLabels: + app: mariadb + template: + metadata: + labels: + app: mariadb + spec: + containers: + - env: + - name: MYSQL_DATABASE + valueFrom: + secretKeyRef: + key: MYSQL_DATABASE + name: db-env + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + key: MYSQL_PASSWORD + name: db-env + - name: MYSQL_ROOT_PASSWORD + value: groupoffice + - name: MYSQL_USER + valueFrom: + secretKeyRef: + key: MYSQL_USER + name: db-env + image: mariadb:10.6.4 + name: mariadb + resources: {} + ports: + - containerPort: 3306 + volumeMounts: + - mountPath: /var/lib/mysql + name: dbdata + restartPolicy: Always + volumes: + - name: dbdata + persistentVolumeClaim: + claimName: dbdata +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: godata +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: goetc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: groupoffice + name: groupoffice +spec: + replicas: 1 + selector: + matchLabels: + app: groupoffice + strategy: + type: Recreate + template: + metadata: + labels: + app: groupoffice + spec: + containers: + - env: + - name: MYSQL_DATABASE + valueFrom: + secretKeyRef: + key: MYSQL_DATABASE + name: db-env + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + key: MYSQL_PASSWORD + name: db-env + - name: MYSQL_USER + valueFrom: + secretKeyRef: + key: MYSQL_USER + name: db-env + image: intermesh/groupoffice:latest + name: groupoffice + command: + - /bin/sh + - -x + - -c + - | + set -ex + ls -alh /etc/groupoffice + cp /usr/local/share/groupoffice/go/modules/community/multi_instance/config.php.tpl /etc/groupoffice/config.php + chown -R www-data:www-data /var/lib/groupoffice + + sed -i 's/{dbHost}/'${MYSQL_HOST}'/' /etc/groupoffice/config.php + sed -i 's/{dbName}/'${MYSQL_DATABASE}'/' /etc/groupoffice/config.php + sed -i 's/{dbUsername}/'${MYSQL_USER}'/' /etc/groupoffice/config.php + sed -i 's/{dbPassword}/'${MYSQL_PASSWORD}'/' /etc/groupoffice/config.php + sed -i 's,{dataPath},/var/lib/groupoffice,' /etc/groupoffice/config.php + sed -i 's,{tmpPath},/tmp/groupoffice,' /etc/groupoffice/config.php + + #call original entry point + docker-php-entrypoint apache2-foreground + ports: + - containerPort: 80 + resources: {} + volumeMounts: + - mountPath: /var/lib/groupoffice + name: godata + - mountPath: /etc/groupoffice + name: goetc + restartPolicy: Always + volumes: + - name: godata + persistentVolumeClaim: + claimName: godata + - name: goetc + persistentVolumeClaim: + claimName: goetc +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: groupoffice + name: groupoffice +spec: + ports: + - name: "http" + port: 80 + targetPort: 80 + selector: + app: groupoffice +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + labels: + app: groupoffice + name: groupoffice +spec: + rules: + - host: groupoffice.bobymcbobs.pair.sharing.io + http: + paths: + - backend: + service: + name: groupoffice + port: + number: 80 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - groupoffice.bobymcbobs.pair.sharing.io + secretName: letsencrypt-prod +#+end_SRC diff --git a/research/mitmproxy.org b/research/mitmproxy.org new file mode 100644 index 0000000..de5b766 --- /dev/null +++ b/research/mitmproxy.org @@ -0,0 +1,43 @@ +#+TITLE: mitmproxy with Podman + +* Setting up +Bringing mitmproxy up with Podman (or Docker): +#+BEGIN_SRC tmate :window mitm +podman run -it --rm -p 8080:8080 --name mitmproxy mitmproxy/mitmproxy +#+END_SRC + +Copy the CA out of the container: +#+BEGIN_SRC tmate :window shell +podman cp mitmproxy:/home/mitmproxy/.mitmproxy/mitmproxy-ca.pem /tmp/mitmproxy-ca.pem +#+END_SRC + +Copy the CA into the ca-trust: +#+BEGIN_SRC tmate :window shell +sudo mv /tmp/mitmproxy-ca.pem /etc/pki/ca-trust/source/anchors/mitmproxy-ca.pem +#+END_SRC + +Update the CA trust cache: +#+BEGIN_SRC tmate :window shell +sudo update-ca-trust +#+END_SRC + +* Usage +Pull a container image: +#+BEGIN_SRC tmate :window shell +HTTPS_PROXY=http://localhost:8080 podman pull k8s.gcr.io/pause:3.2 +#+END_SRC + +Going back to the /mitm/ window, you will now see requests as they come through. + +* Clean up +Delete the container image tag +#+BEGIN_SRC tmate :window shell +podman rmi k8s.gcr.io/pause:3.2 +#+END_SRC + +Remove the CA from the ca-trust and update CA the trust cache: +#+BEGIN_SRC tmate :window shell +sudo rm /etc/pki/ca-trust/source/anchors/mitmproxy-ca.pem +sudo update-ca-trust +podman rm -f mitmproxy +#+END_SRC diff --git a/research/mute-when-typing.md b/research/mute-when-typing.md new file mode 100644 index 0000000..f0d711f --- /dev/null +++ b/research/mute-when-typing.md @@ -0,0 +1,133 @@ +- [Problem](#sec-1) +- [Solution](#sec-2) + - [Define keyboard and audio source](#sec-2-1) + - [in a temp folder, touch a keypress file on every keypress](#sec-2-2) + - [create mute file when the keypress file is updated](#sec-2-3) + - [when the mute file is delete unmute](#sec-2-4) + - [when the mute file is create mute](#sec-2-5) + - [remove the mutefile if there hasn't been keystrokes in a while](#sec-2-6) + - [Wait for all those async processes to exit](#sec-2-7) + + +# Problem + +While on meetings my keyoard is really loud! + +So let's solve that by muting my microphone while I'm typing! + +# Solution + +## Define keyboard and audio source + +```sh +# Find via `xinput --list --name-only` +KEYBOARD="Kinesis Advantage2 Keyboard" +# Find via `pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep input` +AUDIO_SOURCE="alsa_input.usb-Plantronics_Plantronics_Savi_7xx-M-00.analog-mono" +AUDIO_SOURCE="alsa_input.usb-0b0e_Jabra_SPEAK_510_USB_501AA5D89CCB020A00-00.analog-mono" +# touched on keypress +KEYPRESS_FILE=keypress +# exists while recent keypres +MUTE_FILE=mute +``` + +## in a temp folder, touch a keypress file on every keypress + +```sh +cd $(mktemp -d) +# set -x +# set -e +( +while read keypress; do + touch $KEYPRESS_FILE +done < <(xinput test-xi2 --root "$KEYBOARD") +) 2>&1 & +``` + +## create mute file when the keypress file is updated + +```sh +( + while read file; do + if [ $file == $KEYPRESS_FILE ] ; then + touch $MUTE_FILE + fi + done < <(inotifywait -e create,attrib,modify --format '%f' --quiet . --monitor) +) 2>&1 & +``` + +## when the mute file is delete unmute + +```sh +( + while read file; do + if [ $file == $MUTE_FILE ] ; then + echo "UNMUTING" + pactl set-source-mute $AUDIO_SOURCE 0 + # mute with alsa + # amixer -D pulse sset Capture cap + # mute everything with pactl + # pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep -v monitor \ + # | xargs -n 1 -I X pactl set-source-mute X 0 + # get the currenty active window, send alt+a to mute-unmute + # aw=$(xdotool getactivewindow) + # xdotool search --name 'Zoom Meeting ID: .*' \ + # windowactivate --sync \ + # key alt+a \ + # windowactivate $aw + fi + done < <(inotifywait -e delete --format '%f' --quiet . --monitor) +) 2>&1 & +``` + +## when the mute file is create mute + +```sh +( + while read file; do + if [ $file == $MUTE_FILE ] ; then + echo "MUTING" + pactl set-source-mute $AUDIO_SOURCE 1 + # amixer -D pulse sset Capture nocap + # unmute everything with pactl + # pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep -v monitor \ + # | xargs -n 1 -I X pactl set-source-mute X 1 + # aw=$(xdotool getactivewindow) + # xdotool search --name 'Zoom Meeting ID: .*' \ + # windowactivate --sync \ + # key alt+a \ + # windowactivate $aw + fi + done < <(inotifywait -e create --format '%f' --quiet . --monitor) +) 2>&1 & +``` + +## remove the mutefile if there hasn't been keystrokes in a while + +```sh +( + while true ; do + if [ ! -f $KEYPRESS_FILE ] ; then + sleep 0.1 + continue + elif [ ! -f $MUTE_FILE ] ; then + sleep 0.1 + continue + fi + LAST_KEYSTROKE_TIME=$(ls -l --time-style=+%H%M%S%N $KEYPRESS_FILE | awk '{print $6}') + CURRENT_TIME=$(date +%H%M%S%N) + TIME_SINCE_LAST_KEYSTROKE=$(($CURRENT_TIME - $LAST_KEYSTROKE_TIME)) + if [ $TIME_SINCE_LAST_KEYSTROKE -ge 200000000 ] ; then + if [ -f $MUTE_FILE ] ; then + rm $MUTE_FILE && sleep 0.1 && rm -f $MUTE_FILE + fi + fi + done +) 2>&1 & +``` + +## Wait for all those async processes to exit + +```sh +wait +``` diff --git a/research/mute-when-typing.org b/research/mute-when-typing.org new file mode 100644 index 0000000..0007e3a --- /dev/null +++ b/research/mute-when-typing.org @@ -0,0 +1,119 @@ +#+TITLE: Mute While Typing + +* Problem + +While on meetings my keyoard is really loud! + +So let's solve that by muting my microphone while I'm typing! + +* Solution + +** Define keyboard and audio source + +#+begin_src sh :tangle yes + # Find via `xinput --list --name-only` + KEYBOARD="Kinesis Advantage2 Keyboard" + # Find via `pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep input` + AUDIO_SOURCE="alsa_input.usb-Plantronics_Plantronics_Savi_7xx-M-00.analog-mono" + AUDIO_SOURCE="alsa_input.usb-0b0e_Jabra_SPEAK_510_USB_501AA5D89CCB020A00-00.analog-mono" + # touched on keypress + KEYPRESS_FILE=keypress + # exists while recent keypres + MUTE_FILE=mute +#+end_src + +** in a temp folder, touch a keypress file on every keypress + +#+begin_src sh :tangle yes + cd $(mktemp -d) + # set -x + # set -e + ( + while read keypress; do + touch $KEYPRESS_FILE + done < <(xinput test-xi2 --root "$KEYBOARD") + ) 2>&1 & +#+end_src + +** create mute file when the keypress file is updated + +#+begin_src sh :tangle yes + ( + while read file; do + if [ $file == $KEYPRESS_FILE ] ; then + touch $MUTE_FILE + fi + done < <(inotifywait -e create,attrib,modify --format '%f' --quiet . --monitor) + ) 2>&1 & +#+end_src +** when the mute file is delete unmute +#+begin_src sh :tangle yes + ( + while read file; do + if [ $file == $MUTE_FILE ] ; then + echo "UNMUTING" + pactl set-source-mute $AUDIO_SOURCE 0 + # mute with alsa + # amixer -D pulse sset Capture cap + # mute everything with pactl + # pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep -v monitor \ + # | xargs -n 1 -I X pactl set-source-mute X 0 + # get the currenty active window, send alt+a to mute-unmute + # aw=$(xdotool getactivewindow) + # xdotool search --name 'Zoom Meeting ID: .*' \ + # windowactivate --sync \ + # key alt+a \ + # windowactivate $aw + fi + done < <(inotifywait -e delete --format '%f' --quiet . --monitor) + ) 2>&1 & +#+end_src + +** when the mute file is create mute +#+begin_src sh :tangle yes + ( + while read file; do + if [ $file == $MUTE_FILE ] ; then + echo "MUTING" + pactl set-source-mute $AUDIO_SOURCE 1 + # amixer -D pulse sset Capture nocap + # unmute everything with pactl + # pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep -v monitor \ + # | xargs -n 1 -I X pactl set-source-mute X 1 + # aw=$(xdotool getactivewindow) + # xdotool search --name 'Zoom Meeting ID: .*' \ + # windowactivate --sync \ + # key alt+a \ + # windowactivate $aw + fi + done < <(inotifywait -e create --format '%f' --quiet . --monitor) + ) 2>&1 & +#+end_src +** remove the mutefile if there hasn't been keystrokes in a while +#+begin_src sh :tangle yes + ( + while true ; do + if [ ! -f $KEYPRESS_FILE ] ; then + sleep 0.1 + continue + elif [ ! -f $MUTE_FILE ] ; then + sleep 0.1 + continue + fi + LAST_KEYSTROKE_TIME=$(ls -l --time-style=+%H%M%S%N $KEYPRESS_FILE | awk '{print $6}') + CURRENT_TIME=$(date +%H%M%S%N) + TIME_SINCE_LAST_KEYSTROKE=$(($CURRENT_TIME - $LAST_KEYSTROKE_TIME)) + if [ $TIME_SINCE_LAST_KEYSTROKE -ge 200000000 ] ; then + if [ -f $MUTE_FILE ] ; then + rm $MUTE_FILE && sleep 0.1 && rm -f $MUTE_FILE + fi + fi + done + ) 2>&1 & +#+end_src + +** Wait for all those async processes to exit + +#+begin_src sh :tangle yes + wait +#+end_src diff --git a/research/mute-when-typing.sh b/research/mute-when-typing.sh new file mode 100644 index 0000000..6a0b25c --- /dev/null +++ b/research/mute-when-typing.sh @@ -0,0 +1,86 @@ +# Find via `xinput --list --name-only` +KEYBOARD="Kinesis Advantage2 Keyboard" +# Find via `pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep input` +AUDIO_SOURCE="alsa_input.usb-Plantronics_Plantronics_Savi_7xx-M-00.analog-mono" +AUDIO_SOURCE="alsa_input.usb-0b0e_Jabra_SPEAK_510_USB_501AA5D89CCB020A00-00.analog-mono" +# touched on keypress +KEYPRESS_FILE=keypress +# exists while recent keypres +MUTE_FILE=mute + +cd $(mktemp -d) +# set -x +# set -e +( +while read keypress; do + touch $KEYPRESS_FILE +done < <(xinput test-xi2 --root "$KEYBOARD") +) 2>&1 & + +( + while read file; do + if [ $file == $KEYPRESS_FILE ] ; then + touch $MUTE_FILE + fi + done < <(inotifywait -e create,attrib,modify --format '%f' --quiet . --monitor) +) 2>&1 & + +( + while read file; do + if [ $file == $MUTE_FILE ] ; then + echo "UNMUTING" + pactl set-source-mute $AUDIO_SOURCE 0 + # mute with alsa + # amixer -D pulse sset Capture cap + # mute everything with pactl + # pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep -v monitor \ + # | xargs -n 1 -I X pactl set-source-mute X 0 + # get the currenty active window, send alt+a to mute-unmute + # aw=$(xdotool getactivewindow) + # xdotool search --name 'Zoom Meeting ID: .*' \ + # windowactivate --sync \ + # key alt+a \ + # windowactivate $aw + fi + done < <(inotifywait -e delete --format '%f' --quiet . --monitor) +) 2>&1 & + +( + while read file; do + if [ $file == $MUTE_FILE ] ; then + echo "MUTING" + pactl set-source-mute $AUDIO_SOURCE 1 + # amixer -D pulse sset Capture nocap + # unmute everything with pactl + # pactl list sources | grep Name: | awk -F:\ '{print $2}' | grep -v monitor \ + # | xargs -n 1 -I X pactl set-source-mute X 1 + # aw=$(xdotool getactivewindow) + # xdotool search --name 'Zoom Meeting ID: .*' \ + # windowactivate --sync \ + # key alt+a \ + # windowactivate $aw + fi + done < <(inotifywait -e create --format '%f' --quiet . --monitor) +) 2>&1 & + +( + while true ; do + if [ ! -f $KEYPRESS_FILE ] ; then + sleep 0.1 + continue + elif [ ! -f $MUTE_FILE ] ; then + sleep 0.1 + continue + fi + LAST_KEYSTROKE_TIME=$(ls -l --time-style=+%H%M%S%N $KEYPRESS_FILE | awk '{print $6}') + CURRENT_TIME=$(date +%H%M%S%N) + TIME_SINCE_LAST_KEYSTROKE=$(($CURRENT_TIME - $LAST_KEYSTROKE_TIME)) + if [ $TIME_SINCE_LAST_KEYSTROKE -ge 200000000 ] ; then + if [ -f $MUTE_FILE ] ; then + rm $MUTE_FILE && sleep 0.1 && rm -f $MUTE_FILE + fi + fi + done +) 2>&1 & + +wait diff --git a/research/netboot/.kubemacs/cluster-role-binding.yaml b/research/netboot/.kubemacs/cluster-role-binding.yaml new file mode 100644 index 0000000..72c47d1 --- /dev/null +++ b/research/netboot/.kubemacs/cluster-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubemacs-crb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubemacs-sa + # TODO figure out how to set the sa namespace via kustomize + namespace: ii diff --git a/research/netboot/.kubemacs/configuration.yaml b/research/netboot/.kubemacs/configuration.yaml new file mode 100644 index 0000000..dcffdbe --- /dev/null +++ b/research/netboot/.kubemacs/configuration.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubemacs-configuration +data: + TZ: "Pacific/Auckland" + GIT_EMAIL: hh@ii.coop + GIT_NAME: Hippie Hacker + INIT_DEFAULT_REPO: https://github.com/cncf/apisnoop.git + INIT_DEFAULT_DIR: /home/ii/apisnoop + INIT_ORG_FILE: /home/ii/apisnoop/org/tickets/mock-template.org diff --git a/research/netboot/.kubemacs/kind-cluster-config.yaml b/research/netboot/.kubemacs/kind-cluster-config.yaml new file mode 100644 index 0000000..af048ef --- /dev/null +++ b/research/netboot/.kubemacs/kind-cluster-config.yaml @@ -0,0 +1,86 @@ +# kind-cluster-config.yaml +# #+NAME: kind kubeadm DynamicAuditing configuration + +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: + "feature-gates": "DynamicAuditing=true" + "runtime-config": "auditregistration.k8s.io/v1alpha1=true" + "audit-dynamic-configuration": "true" +kubeadmConfigPatchesJSON6902: +- group: kubeadm.k8s.io + version: v1beta2 + kind: ClusterConfiguration + patch: | + - op: add + path: /apiServer/certSANs/- + value: '*.kubemacs.org' + - op: add + path: /apiServer/certSANs/- + value: '*.ii.nz' + - op: add + path: /apiServer/certSANs/- + value: '*.ii.coop' + - op: add + path: /apiServer/certSANs/- + value: '*.sharing.io' +nodes: + - role: control-plane + extraMounts: + # - containerPath: /var/local-path-provisioner + # hostPath: /tmp/workspace/pvcs + # readOnly: False + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + readOnly: False + - containerPath: /var/host/tmp + hostPath: /tmp + readOnly: False + extraPortMappings: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 + # dev tmate target port + - containerPort: 2200 + hostPort: 2200 + # - containerPort: 4000 + # hostPort: 4000 + # - containerPort: 4001 + # hostPort: 4001 + - containerPort: 5432 + hostPort: 5432 + - containerPort: 6443 + hostPort: 6443 + - containerPort: 10350 + hostPort: 10350 + # - containerPort: 80 + # hostPort: 2080 + # - containerPort: 443 + # hostPort: 20443 + kubeadmConfigPatches: + - | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + authorization-mode: "AlwaysAllow" + - role: worker + extraMounts: + # - containerPath: /var/local-path-provisioner + # hostPath: /tmp/workspace/pvcs + # readOnly: False + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + readOnly: False + - containerPath: /var/host/tmp + hostPath: /tmp + readOnly: False diff --git a/research/netboot/.kubemacs/kustomization.yaml b/research/netboot/.kubemacs/kustomization.yaml new file mode 100644 index 0000000..1286dc6 --- /dev/null +++ b/research/netboot/.kubemacs/kustomization.yaml @@ -0,0 +1,19 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - configuration.yaml + - statefulset.yaml + - service-account.yaml + - cluster-role-binding.yaml + - tilt-service.yaml +configMapGenerator: +- name: kubemacs-configuration + behavior: merge + literals: + - TZ=Pacific/Auckland + - GIT_EMAIL=ii@ii.coop + - GIT_NAME=Hippie Hopper + - INIT_ORG_FILE=~/apisnoop/deployments/k8s/xip.io/README.org:37 +# configMapGenerator: +# - name: kubemacs-options +# env: kubemacs-options diff --git a/research/netboot/.kubemacs/service-account.yaml b/research/netboot/.kubemacs/service-account.yaml new file mode 100644 index 0000000..d8ebb59 --- /dev/null +++ b/research/netboot/.kubemacs/service-account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubemacs-sa diff --git a/research/netboot/.kubemacs/statefulset.yaml b/research/netboot/.kubemacs/statefulset.yaml new file mode 100644 index 0000000..0d2af66 --- /dev/null +++ b/research/netboot/.kubemacs/statefulset.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kubemacs +spec: + replicas: 1 + serviceName: "kubemacs" + selector: + matchLabels: + app: kubemacs + template: + metadata: + labels: + app: kubemacs + spec: + serviceAccountName: kubemacs-sa + containers: + - name: kubemacs + image: gcr.io/apisnoop/kubemacs:0.9.33 + command: + - /usr/local/bin/simple-init.sh + - "$INIT_ORG_FILE" + env: + - name: TZ + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: TZ + - name: GIT_COMMITTER_EMAIL + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: GIT_EMAIL + - name: GIT_COMMITTER_NAME + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: GIT_NAME + - name: GIT_AUTHOR_EMAIL + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: GIT_EMAIL + - name: GIT_AUTHOR_NAME + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: GIT_NAME + - name: INIT_DEFAULT_REPO + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: INIT_DEFAULT_REPO + - name: INIT_DEFAULT_DIR + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: INIT_DEFAULT_DIR + - name: INIT_ORG_FILE + valueFrom: + configMapKeyRef: + name: kubemacs-configuration + key: INIT_ORG_FILE + volumeMounts: + - mountPath: '/home/ii/workspace' + name: kubemacs-hostpath + - name: docker + mountPath: /var/run/docker.sock + - name: host-tmp + mountPath: /tmp + volumes: + - name: kubemacs-hostpath + hostPath: + path: /workspace + - name: docker + hostPath: + path: /var/run/docker.sock + type: Socket + - name: host-tmp + hostPath: + path: /var/host/tmp + diff --git a/research/netboot/.kubemacs/tilt-service.yaml b/research/netboot/.kubemacs/tilt-service.yaml new file mode 100644 index 0000000..25a73e6 --- /dev/null +++ b/research/netboot/.kubemacs/tilt-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: kubemacs-tilt +spec: + type: ClusterIP + selector: + app: kubemacs + ports: + - name: "10350" + port: 10350 + targetPort: 10350 diff --git a/research/netboot/Tiltfile b/research/netboot/Tiltfile new file mode 100644 index 0000000..5289e8a --- /dev/null +++ b/research/netboot/Tiltfile @@ -0,0 +1,4 @@ +# -*- mode: python; -*- +k8s_yaml(kustomize('.')) +docker_build('ii/dnsmasq', 'dnsmasq') +allow_k8s_contexts('in-cluster') diff --git a/research/netboot/dnsmasq/Dockerfile b/research/netboot/dnsmasq/Dockerfile new file mode 100644 index 0000000..97b723e --- /dev/null +++ b/research/netboot/dnsmasq/Dockerfile @@ -0,0 +1,22 @@ +# https://github.com/jpillora/docker-dnsmasq/blob/master/Dockerfile + +FROM alpine:edge +LABEL maintainer="dev@jpillora.com" +# webproc release settings +ENV WEBPROC_VERSION 0.2.2 +ENV WEBPROC_URL https://github.com/jpillora/webproc/releases/download/$WEBPROC_VERSION/webproc_linux_amd64.gz +# fetch dnsmasq and webproc binary +RUN apk update \ + && apk --no-cache add dnsmasq tcpdump git \ + && apk add --no-cache --virtual .build-deps curl \ + && curl -sL $WEBPROC_URL | gzip -d - > /usr/local/bin/webproc \ + && chmod +x /usr/local/bin/webproc \ + && apk del .build-deps +#configure dnsmasq +RUN mkdir -p /etc/default/ +RUN echo -e "ENABLED=1\nIGNORE_RESOLVCONF=yes" > /etc/default/dnsmasq +RUN git clone --depth 1 https://github.com/Hexxeh/rpi-firmware.git \ + && mv rpi-firmware /tftpboot +COPY dnsmasq.conf /etc/dnsmasq.conf +#run! +ENTRYPOINT ["webproc","--config","/etc/dnsmasq.conf","--","dnsmasq","--no-daemon"] diff --git a/research/netboot/dnsmasq/deployment.yaml b/research/netboot/dnsmasq/deployment.yaml new file mode 100644 index 0000000..778a2e9 --- /dev/null +++ b/research/netboot/dnsmasq/deployment.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dnsmasq +spec: + replicas: 1 + selector: + matchLabels: + coop.ii: dnsmasq + template: + metadata: + labels: + coop.ii: dnsmasq + spec: + allowPrivilegeEscalation: true + allowedCapabilities: + - NET_ADMIN + - NET_RAW + - SYS_ADMIN + hostNetwork: true + hostPorts: + - max: 7472 + min: 7472 + privileged: true + spec: + containers: + - name: dnsmasq + image: "ii/dnsmasq" + ports: + - containerPort: 67 + hostPort: 67 + protocol: UDP + - containerPort: 69 + hostPort: 69 + protocol: UDP diff --git a/research/netboot/dnsmasq/dnsmasq.conf b/research/netboot/dnsmasq/dnsmasq.conf new file mode 100644 index 0000000..e39820b --- /dev/null +++ b/research/netboot/dnsmasq/dnsmasq.conf @@ -0,0 +1,14 @@ +#dnsmasq config, for a complete example, see: +# http://oss.segetech.com/intra/srv/dnsmasq.conf + +port=0 +dhcp-range=192.168.1.0,proxy + +pxe-service=0,"Raspberry Pi Boot" + +enable-tftp + +tftp-root=/tftpboot + +log-dhcp + diff --git a/research/netboot/dnsmasq/kustomization.yaml b/research/netboot/dnsmasq/kustomization.yaml new file mode 100644 index 0000000..9c2d28b --- /dev/null +++ b/research/netboot/dnsmasq/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml diff --git a/research/netboot/docs/README.org b/research/netboot/docs/README.org new file mode 100644 index 0000000..2fd64cd --- /dev/null +++ b/research/netboot/docs/README.org @@ -0,0 +1,57 @@ +#+TITLE: Raspberry Pis @ ii + +* Goals + +With a number of [[https://www.raspberrypi.org/products/raspberry-pi-4-model-b/][Raspberry Pi 4]] we can provide the following; + +- centerally managed workstations for various group activities +- cost effective infrastructure + +* Setup + +The current solution is using a number of Raspberry Pi4 (4GB) as workstations and well as the server. + +** Server + +- Operating System: Ubuntu 19.10 + +#+begin_example +Linux rpi0 5.3.0-1014-raspi2 #16-Ubuntu SMP Tue Nov 26 11:18:23 UTC 2019 aarch64 aarch64 aarch64 GNU/Linux +#+end_example + +*** Key Software + +- [[http://www.thekelleys.org.uk/dnsmasq/doc.html][dnsmasq]]: TFTP server +- [[https://github.com/nfs-ganesha/nfs-ganesha][nfs-ganesha]]: NFS server + +** Workstations + +- Operating System: Raspbian GNU/Linux 10 (buster) + +#+begin_example +Linux rpi-d1 4.19.80-v7l+ #1275 SMP Mon Oct 28 18:37:34 GMT 2019 armv7l GNU/Linux +#+end_example + +*** Key Software + +- [[https://packages.debian.org/buster/nfs-common][nfs-common]]: NFS client +- [[https://packages.debian.org/buster/openssh-server][openssh-server]]: debug clients remotely + +* Network Booting Sequence + +[[./images/pi-diagram1-network-boot-sequence.png]] + +* Support + +Each part of the process will be documented further as outlined below. + +- [[pi-server.org][setup Raspberry Pi Server]] +- [[./pi-server.org#tftp-server][install & config TFTP]] +- [[./pi-server.org#nfs-server][install & config NFS]] +- [[pi-client.org][setup Raspberry Pi client]] + +* References + +- [[https://github.com/raspberrypi/rpi-eeprom/blob/master/firmware/raspberry_pi4_network_boot_beta.md][github.com/raspberrypi/rpi-eeprom/blob/master/firmware/raspberry_pi4_network_boot_beta.md]] +- [[https://www.blockdev.io/network-booting-a-raspberry-pi-3/][www.blockdev.io/network-booting-a-raspberry-pi-3]] +- [[https://github.com/Hexxeh/rpi-firmware][github.com/Hexxeh/rpi-firmware]] diff --git a/research/netboot/docs/images/pi-diagram1-network-boot-sequence.plantuml b/research/netboot/docs/images/pi-diagram1-network-boot-sequence.plantuml new file mode 100644 index 0000000..fe936c9 --- /dev/null +++ b/research/netboot/docs/images/pi-diagram1-network-boot-sequence.plantuml @@ -0,0 +1,52 @@ +@startuml +title Network Booting a Pi\n +footer \nDiagram 1 + +skinparam sequenceArrowThickness 2 +skinparam roundcorner 10 + +participant "Pi Client" as PiC #red +participant Network #grey +participant Router #DodgerBlue +participant "Pi Server" as PiS #LimeGreen + +hnote over PiC : Power On + +note left of PiC #aqua + BOOT_ORDER + Checks +end note + +PiC --> Network: Broadcast request for an IP Address +note left of PiC #aqua + DHCP +end note + +Router -> PiC: Provides an IP address +PiS -> PiC: Provides PXE & TFTP service +||30|| + +PiC -> PiS: Request TFTP files + +note right of PiS #aqua + TFTP files via + DNSMASQ +end note + +PiS -> PiC: Receive TFTP files + +note left of PiC: Process\n**cmdline.txt** + +PiC -> PiS: Request NFS files + +note right of PiS #aqua + NFS files via + NFS-Ganesha +end note + +PiS -> PiC: Receive NFS files +||20|| +...~3 minutes later... + +hnote over PiC : Desktop Ready +@enduml diff --git a/research/netboot/docs/images/pi-diagram1-network-boot-sequence.png b/research/netboot/docs/images/pi-diagram1-network-boot-sequence.png new file mode 100644 index 0000000..689943c Binary files /dev/null and b/research/netboot/docs/images/pi-diagram1-network-boot-sequence.png differ diff --git a/research/netboot/docs/images/pi-diagram2-boot-sequence.plantuml b/research/netboot/docs/images/pi-diagram2-boot-sequence.plantuml new file mode 100644 index 0000000..a888bed --- /dev/null +++ b/research/netboot/docs/images/pi-diagram2-boot-sequence.plantuml @@ -0,0 +1,39 @@ +@startuml +title Pi Boot Sequence\n +footer \nDiagram 2 + +skinparam sequenceArrowThickness 2 +skinparam roundcorner 10 + +participant "Pi Client" as PiC #red +participant Network #grey +participant Router #DodgerBlue +participant "Pi Server" as PiS #LimeGreen + +hnote over PiC : Power On + +note left of PiC #aqua + BOOT_ORDER + Checks +end note + +PiC --> Network: Broadcast request for an IP Address +note left of PiC #aqua + DHCP +end note + +Router -> PiC: Provides an IP address +PiS -> PiC: Provides PXE & TFTP service +||30|| + +PiC -> PiS: Request TFTP files + +note right of PiS #aqua + TFTP files via + DNSMASQ +end note + +PiS -> PiC: Receive TFTP files +||20|| +... ... +@enduml diff --git a/research/netboot/docs/images/pi-diagram2-boot-sequence.png b/research/netboot/docs/images/pi-diagram2-boot-sequence.png new file mode 100644 index 0000000..b9fc38b Binary files /dev/null and b/research/netboot/docs/images/pi-diagram2-boot-sequence.png differ diff --git a/research/netboot/docs/images/pi-sequence-diagrams.org b/research/netboot/docs/images/pi-sequence-diagrams.org new file mode 100644 index 0000000..3d48c36 --- /dev/null +++ b/research/netboot/docs/images/pi-sequence-diagrams.org @@ -0,0 +1,117 @@ +#+TITLE: Pi Sequence Diagrams +#+AUTHOR: Stephen Heywood + +* Overview + +Diagrams can convey in a simple way a number of key ideas around a focused topic or process. To create or update a diagram one would normally use some type of drawing application. An alternative method is using software that describes the diagram, namely [[https://plantuml.com/sequence-diagram][PlantUML]]. The initial diagrams will use [[https://plantuml.com/sequence-diagram][sequence]] format. + +* Setup + +A server can be run inside a docker container that provides a web page to edit the "diagram" as well as viewing and saving the resulting image. + +#+begin_src shell +docker run --rm -p8080:8080 plantuml/plantuml-server +#+end_src + +For now the content for the diagram will need to be manually synced/tested inside a browser with the PlantUML server. + +* Diagram 1: Network Booting a Pi + +#+begin_src plantuml :eval never :tangle pi-diagram1-network-boot-sequence.plantuml +@startuml +title Network Booting a Pi\n +footer \nDiagram 1 + +skinparam sequenceArrowThickness 2 +skinparam roundcorner 10 + +participant "Pi Client" as PiC #red +participant Network #grey +participant Router #DodgerBlue +participant "Pi Server" as PiS #LimeGreen + +hnote over PiC : Power On + +note left of PiC #aqua + BOOT_ORDER + Checks +end note + +PiC --> Network: Broadcast request for an IP Address +note left of PiC #aqua + DHCP +end note + +Router -> PiC: Provides an IP address +PiS -> PiC: Provides PXE & TFTP service +||30|| + +PiC -> PiS: Request TFTP files + +note right of PiS #aqua + TFTP files via + DNSMASQ +end note + +PiS -> PiC: Receive TFTP files + +note left of PiC: Process\n**cmdline.txt** + +PiC -> PiS: Request NFS files + +note right of PiS #aqua + NFS files via + NFS-Ganesha +end note + +PiS -> PiC: Receive NFS files +||20|| +...~3 minutes later... + +hnote over PiC : Desktop Ready +@enduml + #+end_src + +* Diagram 2: Boot Sequence + +#+begin_src plantuml :eval never :tangle pi-diagram2-boot-sequence.plantuml + @startuml +title Pi Boot Sequence\n +footer \nDiagram 2 + +skinparam sequenceArrowThickness 2 +skinparam roundcorner 10 + +participant "Pi Client" as PiC #red +participant Network #grey +participant Router #DodgerBlue +participant "Pi Server" as PiS #LimeGreen + +hnote over PiC : Power On + +note left of PiC #aqua + BOOT_ORDER + Checks +end note + +PiC --> Network: Broadcast request for an IP Address +note left of PiC #aqua + DHCP +end note + +Router -> PiC: Provides an IP address +PiS -> PiC: Provides PXE & TFTP service +||30|| + +PiC -> PiS: Request TFTP files + +note right of PiS #aqua + TFTP files via + DNSMASQ +end note + +PiS -> PiC: Receive TFTP files +||20|| +... ... +@enduml +#+end_src diff --git a/research/netboot/docs/pi-client.org b/research/netboot/docs/pi-client.org new file mode 100644 index 0000000..404d1c5 --- /dev/null +++ b/research/netboot/docs/pi-client.org @@ -0,0 +1,146 @@ +#+TITLE: Pi Client +#+AUTHOR: Stephen Heywood +#+DATE: 11 December, 2019 + + +* Overview + +This document will cover the process of setting up a Raspberry Pi 4 that will boot it's operating system from the network without needing any SD card. + +* Prerequisites + +To make the Raspberry Pi a network bootable system there are some key bits of software that's required. Double check that the Pi is booting the latest version of [[https://www.raspberrypi.org/downloads/raspbian/][Raspbian]]. + +#+begin_src shell :eval never +sudo apt install rpi-eeprom +sudo rpi-eeprom-update +#+end_src + +Current output is + +#+begin_example +BOOTLOADER: up-to-date +CURRENT: Wed Oct 16 17:00:03 UTC 2019 (1571245203) + LATEST: Tue Sep 10 10:41:50 UTC 2019 (1568112110) +VL805: up-to-date +CURRENT: 000137ab + LATEST: 000137ab +#+end_example + +* Initial Boot Process + +The [[https://www.raspberrypi.org/documentation/hardware/raspberrypi/bootmodes/bootflow_2711.md][Raspberry Pi website]] lists in full the details on how a Raspberry Pi 4 boots. The keys points that we are interested in are shown in the diagram below. + +[[./images/pi-diagram2-boot-sequence.png]] + +By checking the ~BOOT_ORDER~ setting the Pi will decide what boot modes it will use and in what order. These boot modes are stored as a 32bit unsigned integer. + +#+begin_example +0x0 NONE +0x1 SD CARD +0x2 NETWORK +#+end_example + +Using the following command will confirm the current boot modes. Within the list of other options, ~BOOT_ORDER=0x1~ is the default configuration to boot via a SD card. + +#+begin_src shell :eval never +vcgencmd bootloader_config +#+end_src + +To setup the Raspberry Pi to boot from the network we will be using the config option ~BOOT_ORDER=0x21~. This configuration will cause the Pi to first boot from a SD card if present, if not then to boot from the network. + +** Update ~BOOT_ORDER~ setting + +To write the new ~BOOT_ORDER~ to the EEPROM will require the following steps. + +*** Extract the configuration file + +#+begin_src shell :eval never +cp /lib/firmware/raspberrypi/bootloader/beta/pieeprom-2019-11-18.bin pieeprom.bin +rpi-eeprom-config pieeprom.bin > bootconf.txt +#+end_src + +*** Update and save the configuration file + +- use an text editor to open ~bootconf.txt~ +- locate ~BOOT_ORDER~ and update the line to be ~BOOT_ORDER=0x21~ +- save the file + +*** Update EEPROM image with new configuration + +#+begin_src shell :eval never +rpi-eeprom-config --out pieeprom-netboot.bin --config bootconf.txt pieeprom.bin +#+end_src + +*** Flash the EEPROM with the new image + +#+begin_src shell :eval never +sudo rpi-eeprom-update -d -f ./pieeprom-netboot.bin +sudo reboot +#+end_src + +*** Recheck ~BOOT_ORDER~ setting + +#+begin_src shell :eval never +vcgencmd bootloader_config +#+end_src + +Make sure that the output contains ~BOOT_ORDER=0x21~ now. Details about the other configuration options are on the [[https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2711_bootloader_config.md][Pi4 Bootloader Configuration]] web page. +* Image Client +** Prep Image + +Before we image the client Pi we need to update a few local settings. + +*** Check Kernel version + +Make sure that the kernel version used by the Pi is in sync with the kernel version that will be served from the [[https://github.com/Hexxeh/rpi-firmware][/tftpboot]] folder. +If the kernel versions get out of sync it will cause issues with the keyboard and mouse. Further details are logged in this [[https://gitlab.ii.coop/ii/infra/ii-pi/issues/16][issue]]. + +#+begin_src shell :eval never + uname -a +#+end_src + +#+begin_example +Linux rpi-40 4.19.80-v7l+ #1275 SMP Mon Oct 28 18:37:34 GMT 2019 armv7l GNU/Linux +#+end_example + +*** Enable SSH + +Use ~raspi-config~ to enable the local SSH server. This will help with any remote debugging of the client. Locate ~SSH~ inside of the /Interfacing Options/ + +*** Disable Swapfile + +As the client will be booting from the server we need to disable the swapfile + +#+begin_src shell :eval never +sudo dphys-swapfile swapoff +sudo dphys-swapfile uninstall +sudo systemctl stop dphys-swapfile +sudo systemctl disable dphys-swapfile +#+end_src + +** Clone the file system + +Let's create a copy of the core file system so that we can then make a /tar/ file. The /tar/ file will be copied to the server. + + #+begin_src shell :eval never + sudo mkdir -p /nfs/client1 + sudo rsync -xa --progress --exlude /nfs --exlude /dev --exclude /sys --exclude /tmp / /nfs + sudo tar -cpf /nfs-client1.tar /nfs + #+end_src + +** Copy compressed file to server + +Make sure that the key steps in the server documentation have been completed first. Also make the folder ~/nfs/tmp~ as well. + + #+begin_src shell :eval never + scp /nfs-client1.tar ubuntu@192.168.1.200:/nfs/tmp + #+end_src + +** Extract the file system + +On the server we will extract the file system for the client to ~/nfs/client1~ + + #+begin_src shell :eval never + sudo tar --same-owner -xvf nfs-client1.tar -C /nfs + #+end_src diff --git a/research/netboot/docs/pi-server.org b/research/netboot/docs/pi-server.org new file mode 100644 index 0000000..1c138e2 --- /dev/null +++ b/research/netboot/docs/pi-server.org @@ -0,0 +1,478 @@ +#+TITLE: Pi Server +#+AUTHOR: Stephen Heywood +#+DATE: 12 December, 2019 + + +* Overview + +This document will cover the process of setting up a Raspberry Pi 4 as a network server. The server will provide support for a number of Raspberry Pi clients, these clients will PXE boot from the server. + +The documentation uses the ~192.168.1.0/24~ network as a reference. Please update the setting to match your current requirements. + +* Key Software +** Operating System & Kernel + + #+begin_src shell :eval never + lsb_release -irc + #+end_src + + #+begin_example + Distributor ID: Ubuntu + Release: 19.10 + Codename: eoan + #+end_example + + #+begin_src shell :eval never + ubuntu@rpi0:~$ uname -a + #+end_src + + #+begin_example + Linux rpi0 5.3.0-1014-raspi2 #16-Ubuntu SMP Tue Nov 26 11:18:23 UTC 2019 aarch64 aarch64 aarch64 GNU/Linux + #+end_example + +** Network Services + +| *Software* | *Version* | *Role* | +|------------------+-----------+-------------| +| ~dnsmasq~ | 2.80 | TFTP Server | +| ~nfs-ganesha~ | 2.7.6-1 | NFS Server | +| ~openssh-server~ | 8.0p1 | SSH Server | + +* Installation +** Install Image + +Check the Ubuntu web site for details about the latest [[https://ubuntu.com/download/raspberry-pi][Raspberry Pi image]], the server will be using the [[http://cdimage.ubuntu.com/releases/19.10.1/release/ubuntu-19.10.1-preinstalled-server-arm64+raspi3.img.xz][64bit image]]. Make sure that the downloaded version is for ~arm64~. +Follow the instructions on the website for trasfering the image to a SD card based on your operating system. +A simple to way to trasfering the image to the SD card is with [[https://www.balena.io/etcher/][Etcher]]. + +** First Boot + +When booting the Raspberry Pi with the new SD card make sure that everything has finished initialising before trying to login to the server. If not, this can cause issues when trying to login with the credentials listed below. + +- User ID: ~ubuntu~ +- Password: ~ubuntu~ + +You will be required to change the default password when logging in for the first time. + +** Hostname + +The default hostname for the image is /ubuntu/. The following will let you rename the server to something that fits your environment more. +After updating and saving changes the server will need a reboot. + +#+begin_src shell :eval never +sudo vi /etc/hostname +sudo reboot +#+end_src + +* Networking + +There are two options for setting the IP address for the server, either static or dynamic addressing. This project will be using dynamic addressing that will /reserve/ a dynamic address from the DHCP server. + +Use the following command to locate the MAC address of the Pi + +#+begin_src shell :eval never +ip a +#+end_src + +The address will be listed after ~link/ether~ + +#+begin_example +2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether dc:a6:32:1c:2b:8a brd ff:ff:ff:ff:ff:ff +#+end_example + +Your DHCP server will most likely be running on your local router. The manual for the router should explain how to /reserve/ or /bind/ an IP address to a MAC address. +Confirm that the address has been reserved on the DHCP server by rebooting the server. Using the MAC address of this Pi as a reference we reserved the IP address ~192.168.1.200~. + +#+begin_src shell :eval never +sudo reboot +#+end_src + +* TFTP Server +** Installation + +To install the TFTP Server run the following command. + + #+begin_src shell :eval never +sudo apt install dnsmasq + #+end_src + +Note that the ~dnsmasq.service~ will fail to start on after the installation has completed due to ~dnsmasq~ conflicting with ~systemd-resolv~ which will be addressed in the next section. + +** Configuration + +Let's add some settings to ~dnsmasq.conf~ that support our project. + + #+begin_src shell :eval never + sudo vi /etc/dnsmasq.conf + #+end_src + +Add the following to the start of the file. You can find each item commented out at some point within the file if you prefer to do it that way. + + #+begin_example + port=0 + dhcp-range=192.168.1.0,proxy + pxe-service=0,"Raspberry Pi Boot" + enable-tftp + tftp-root=/tftpboot + #+end_example + +We need to setup the TFTP folder with the files that will start the boot process. + + #+begin_src shell :eval never + cd ~ + git clone --depth 1 https://github.com/Hexxeh/rpi-firmware.git + sudo mv rpi-firmware /tftpboot + #+end_src + +Next, lets check the current status of ~dnsmasq~ before starting it. Then check that the service has start without any errors. If there are any problems then recheck ~/etc/dnsmasq.conf~ before restarting ~dnsmasq~. For more help review the logs from ~journalctl -xe~. + + #+begin_example + sudo systemctl status dnsmasq + sudo systemctl start dnsmasq + sudo systemctl status dnsmasq + #+end_example + +To check that the server is listening for TFTP clients run the following command. + +#+begin_src shell :eval never +sudo ss -ulp +#+end_src + +Which should give a result similar to the following + +#+begin_example +State Recv-Q Send-Q Local Address:Port Peer Address:Port +UNCONN 0 0 127.0.0.53%lo:domain 0.0.0.0:* users:(("systemd-resolve",pid=1151,fd=12)) +UNCONN 0 0 192.168.1.104%eth0:bootpc 0.0.0.0:* users:(("systemd-network",pid=1108,fd=15)) +UNCONN 0 0 0.0.0.0:tftp 0.0.0.0:* users:(("dnsmasq",pid=2307,fd=4)) +UNCONN 0 0 [::]:tftp [::]:* users:(("dnsmasq",pid=2307,fd=5)) +#+end_example + +** Contents + +~/tftpboot~ will hold the kernel and other supporting files for the Pi to complete it's initial boot. Core operating system files and the desktop will load via the NFS server. + +*** Per Pi Config + +To load a configuration for each Pi on the network we need to create a directory under ~/tftpboot~ from the Pi's serial number, which is the last 8 characters from the output of the command below. + +#+begin_src shell :eval never +grep Serial /proc/cpuinfo +#+end_src + +~start4.elf~ is the first file that the client will want to download from the server. It will test to see if this file can be found in /serial/ sub folder. +Unless it's found all files will be searched from the root ~/tftpboot~ folder. + +| *File* | *Purpose* | +|-----------------------------+-------------------------------------------------| +| ~start4.elf~ | Firmware file specific to Pi 4 | +| ~config.txt~ | Raspberry Pi configuration file | +| ~fixup4.dat~ | Linker file, matches ~start4.elf~ | +| ~bcm2711-rpi-4-b.dtb~ | Hardware definitions | +| ~overlay/vc4-fkms-v3d.dtbo~ | Hardware definitions referenced in ~config.txt~ | +| ~cmdline.txt~ | Read for kernel command line string | +| ~kernel7l.img~ | Default kernel for the Pi 4 | + +The best way to manage each serial Pi folder is to symlink all files other than ~config.txt~ and ~cmdline.txt~ to the master file in either ~/tftpboot~ or ~/tftpboot/overlay~. + +*** ~config.txt~ + +This file can be found on the Raspbian ~/boot~ folder. Depending on the monitor connected to the Pi client it may need to be updated. + +*** ~cmdline.txt~ + +This file provides the kernel with settings that enable it to boot the core operating system files which in turn will load the desktop for the end user. + +#+begin_src text +otg.lpm_enable=0 console=serial0,115200 console=tty1 root=/dev/nfs nfsroot=192.168.1.200:/nfs/client6,udp,nfsvers=3 rw ip=dhcp rootwait elevator=deadline +#+end_src + +There are a number of setting that are all linked to the configuration of the Pi server + +- ~root=/dev/nfs~ defines the location of the root filesystem. This is a /pseudo-NFS-device/ that tells the kernel to use NFS instead of a real device. +- ~nfsroot~ defines where the root of the NFS which is the IP of the Pi server (~192.168.1.200~ in this example) before setting the root directory, which is ~/nfs/client6~ in this example + +The last part of the ~nfsroot~ is for various NFS options + +* NFS Server + +The current connection between the Pi client and server is NFSv3 using UID/GID. This will be reviewed when the file permissions for NFSv4 which use ~imapd.conf~ can be resolved. + +#+begin_src shell :eval never +sudo apt install nfs-ganesha +#+end_src + +Each Pi client will need an IP address reserved on the network by the DHCP server so that it can be linked to the NFS folder on the server. +Update the ~ganesha.conf~ to match your requirements. Note: The NFS server will have issues if all NFS paths arn't on the server. + +Copy ~ganesha.conf~ to the server folder ~/etc/ganesha/~ + +#+begin_src shell text :tangle ./conf/ganesha.conf +EXPORT +{ + Export_Id = 101; + Path = /nfs/client1; + Pseudo = /nfs/client1; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.101; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 102; + Path = /nfs/client2; + Pseudo = /nfs/client2; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.102; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 103; + Path = /nfs/client3; + Pseudo = /nfs/client3; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.103; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 104; + Path = /nfs/client4; + Pseudo = /nfs/client4; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.104; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 105; + Path = /nfs/client5; + Pseudo = /nfs/client5; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.105; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 106; + Path = /nfs/client6; + Pseudo = /nfs/client6; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.106; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 107; + Path = /nfs/client7; + Pseudo = /nfs/client7; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.107; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 108; + Path = /nfs/client8; + Pseudo = /nfs/client8; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.108; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +EXPORT +{ + Export_Id = 109; + Path = /nfs/client9; + Pseudo = /nfs/client9; + Access_Type = RW; + Protocols = "3"; + + FSAL { + Name = VFS; + } + + CLIENT { + Clients = 192.168.1.109; + Access_Type = "RW"; + Squash = No_Root_Squash; + } +} + +LOG +{ + COMPONENTS { + EXPORT = INFO; + } +} +#+end_src + +* Storage + +The Pi server is using /ext4/ for the root filesystem. The Pi client folders will be via an external drive using /ZFS/. This will give us the ability to /snapshot/ each Pi as required. Also, these /snapshots/ will provide fast deployment and/or recovery points. + +| *Filesystem* | *Type* | Mounted on | +|------------------+--------+------------| +| ~/dev/mmcblk0p2~ | ext4 | / | +| ~nfs~ | zfs | /nfs | + +** Installation + +Please note that the CPU load for installing the ZFS packages is high and can cause the Pi to overheat if it doesn't have suitable cooling. +Also, the build time for the kernel modules will take some time. While this is happing we will use ~htop~ to monitor the load and processes running on the Pi. + +#+begin_src shell :eval never +tmux +htop +#+end_src + +To split the tmux pane use /Ctl-B/ " + +#+begin_src shell :eval never +sudo apt install zfsutils-linux zfs-dkms +#+end_src + +After the installation reboot the server and check that the kernel modules for ZFS are all loaded. + +#+begin_src shell :eval never +lsmod | grep zfs +#+end_src + +** Create ZFS Storage + +*Warning: This section will destroy any data on the external drive. Please make sure you have a tested backup of any data that is on the drive before starting this section.* + +The size of the USB external drive will determine how much data we can store. Initial setup of six Pi clients requires ~50GB. +Depending on the external USB drive you connect to the Pi, the "labels" for this drive can vary a lot. + +#+begin_src shell :eval never +lsblk +ls -l /dev/disk/by-id/ +#+end_src + +Most likely, the drive is ~sda~ in the first output, we need to find the ~by-id~ for this drive in the second output listing. + +Replace the last part of the following command to match your drive. ~/nfs~ will be folder on the server which will be referenced by ZFS as ~nfs~ for the storage pool name. + +#+begin_src shell :eval never +sudo zpool create -f -m /nfs nfs scsi-SStoreJet__61006325770FB5262136 +#+end_src + +Check that the ZFS pool has been create with the following + +#+begin_src shell :eval never +zpool list +#+end_src + +The /FREE/ column should be close to the size of the external drive. + +** Snapshot client folder + +Let's create a snapshot before we create extra clients connection points + +#+begin_src shell :eval never +sudo zfs snapshot nfs/client1@now +#+end_src + +** Create extra clients + +Repeat the following for as many clients as required. + + #+begin_src shell :eval never + sudo zfs clone nfs/client1@now nfs/client2 + #+end_src + +* Update client /etc + +We need to update the hostname for each client so the DHCP server will register the name for every Pi. + +#+begin_src shell :eval never +sudo vi /nfs/client2/etc/hostname +#+end_src + +Update the hostname reference with the IP 127.0.1.1 + +#+begin_src shell :eval never +sudo vi /nfs/client2/etc/hosts +#+end_src diff --git a/research/netboot/kind-cluster-config.yaml b/research/netboot/kind-cluster-config.yaml new file mode 100644 index 0000000..e846eb8 --- /dev/null +++ b/research/netboot/kind-cluster-config.yaml @@ -0,0 +1,61 @@ +# kind-cluster-config.yaml +# #+NAME: kind kubeadm DynamicAuditing configuration + +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: + "feature-gates": "DynamicAuditing=true" + "runtime-config": "auditregistration.k8s.io/v1alpha1=true" + "audit-dynamic-configuration": "true" +kubeadmConfigPatchesJSON6902: +- group: kubeadm.k8s.io + version: v1beta2 + kind: ClusterConfiguration + patch: | + - op: add + path: /apiServer/certSANs/- + value: '*.kubemacs.org' + - op: add + path: /apiServer/certSANs/- + value: '*.ii.nz' + - op: add + path: /apiServer/certSANs/- + value: '*.ii.coop' + - op: add + path: /apiServer/certSANs/- + value: '*.sharing.io' +nodes: + - role: control-plane + extraMounts: + - containerPath: /var/run/docker.sock + hostPath: /var/run/docker.sock + readOnly: False + - containerPath: /var/host/tmp + hostPath: /tmp + readOnly: False + extraPortMappings: + - containerPort: 6443 + hostPort: 6443 + - containerPort: 67 + hostPort: 67 + listenAddress: "192.168.1.99" + protocol: UDP + - containerPort: 69 + hostPort: 69 + listenAddress: "192.168.1.99" + protocol: UDP + kubeadmConfigPatches: + - | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + authorization-mode: "AlwaysAllow" diff --git a/research/netboot/kustomization.yaml b/research/netboot/kustomization.yaml new file mode 100644 index 0000000..942528b --- /dev/null +++ b/research/netboot/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: + - dnsmasq diff --git a/research/netboot/setup.sh b/research/netboot/setup.sh new file mode 100644 index 0000000..2d2bf08 --- /dev/null +++ b/research/netboot/setup.sh @@ -0,0 +1,31 @@ +# Make sure current kubemacs has been downloaded +docker pull gcr.io/apisnoop/kubemacs:0.9.33 +# Build the dnsmasq container locally for now +docker build -t ii/dnsmasq ./dnsmasq + +# Run with EMAIL=me@my.net NAME="First Last" bash setup.sh +NAME=${NAME:-"Hippie Hacker"} +EMAIL=${EMAIL:-"hh@ii.coop"} +KIND_IMAGE="kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62" +KIND_CONFIG="kind-cluster-config.yaml" +K8S_RESOURCES="k8s-resources.yaml" +DEFAULT_NS="ii" + +kind create cluster --config $KIND_CONFIG --image $KIND_IMAGE + +kind load docker-image --nodes kind-control-plane ii/dnsmasq +kind load docker-image --nodes kind-control-plane gcr.io/apisnoop/kubemacs:0.9.33 + +kubectl create ns $DEFAULT_NS +kubectl config set-context $(kubectl config current-context) --namespace=$DEFAULT_NS +kubectl apply -f ./.kubemacs + +echo "Waiting for Kubemacs StatefulSet to have 1 ready Replica..." +while [ "$(kubectl get statefulset kubemacs -o json | jq .status.readyReplicas)" != 1 ]; do + sleep 1s +done + +sleep 5 +kubectl wait --for=condition=Ready pod/kubemacs-0 +kubectl wait --for=condition=Ready pod/kubemacs-0 +kubectl exec -ti kubemacs-0 -- attach diff --git a/research/netboot/tilt.org b/research/netboot/tilt.org new file mode 100644 index 0000000..87580af --- /dev/null +++ b/research/netboot/tilt.org @@ -0,0 +1,15 @@ +# -*- ii: y; -*- +#+TITLE: tilt.localho.st config + +* Bring up tilt +#+begin_src tmate :dir "." :session ii:tilt + tilt up --host 0.0.0.0 +#+end_src +* These files/folders map to these *.localho.st sites on 127.0.0.1 +** http://tilt.localho.st +Our [[file:Tiltfile::k8s_yaml(kustomize('.'))][./Tiltfile]] uses the [[file:kustomization.yaml::kind:%20Kustomization][./kustomization.yaml]] to figure out what resources to +deploy. Changes to any file referenced will result in immediate changes to the +deployed resources. If [[file:Tiltfile::docker_build(][docker_build()]] entries are uncommented, those images will +be rebuilt, pushed, and pods restarted automatically. +* Visit these sites +- http://tilt.localho.st diff --git a/research/org-block-postgresql.org b/research/org-block-postgresql.org new file mode 100644 index 0000000..c6fd91e --- /dev/null +++ b/research/org-block-postgresql.org @@ -0,0 +1,301 @@ +#+TITLE: BOO +* Footnotes +** sql-product +#+begin_src elisp +(symbol-value 'sql-product) +#+end_src + +#+RESULTS: +#+begin_src elisp +ansi +#+end_src + +** sql-connection-alist +#+begin_src elisp +(symbol-value 'sql-connection-alist) +#+end_src + +#+RESULTS: +#+begin_src elisp +nil +#+end_src + +** org-babel-default-header-args:sql-mode +#+begin_src elisp +(symbol-value 'org-babel-default-header-args:sql-mode) +#+end_src + +#+RESULTS: +#+begin_src elisp +((:results . "replace code") + (:product . "postgres") + (:wrap . "SRC example")) +#+end_src + +** org-babel-header-args:sql-mode +#+begin_src elisp +(symbol-value 'org-babel-header-args:sql-mode) +#+end_src + +#+RESULTS: +#+begin_src elisp +((:product . :any) + (:session . :any)) +#+end_src + +** org-file-properties +We've set this one in the past to a alist of +#+begin_src elisp + (set (make-local-variable 'org-file-properties) + (list + (cons 'header-args:sql-mode + (concat + ":noweb yes" + " :noweb-ref " item-str + " :comments org" + " :eval never-export" + " :results code" + " :product postgres" + " :session data" + ;; " :session (symbol-value user-login-name)" + ;; " :session (concat user-login-name \":\" " "main" ")" + ;; " :session (concat user-login-name \":\" " item-str ")" + " :exports both" + )) + ) +#+end_src + +#+RESULTS: +#+begin_src elisp +((:product . :any) + (:session . :any)) +#+end_src +* Deploying postgres +** Secrets +#+name: postgres-secret +#+begin_src yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-configuration +stringData: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + PGDATABASE: postgres + PGUSER: postgres +#+end_src +** Deployment +#+name: postgres-deployment +#+begin_src yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres +spec: + replicas: 1 + serviceName: "postgres" + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + restartPolicy: Always + containers: + - name: postgres + image: docker.io/postgres:12.2-alpine + securityContext: + readOnlyRootFilesystem: true + runAsUser: 70 + runAsGroup: 70 + allowPrivilegeEscalation: false + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + - name: var-run-postgresql + mountPath: /var/run/postgresql + - name: tmp + mountPath: /tmp + ports: + - containerPort: 5432 + livenessProbe: + exec: + command: + - "sh" + - "-c" + - "pg_isready" + - "-U" + - "$POSTGRES_USER" + failureThreshold: 5 + periodSeconds: 10 + timeoutSeconds: 5 + env: + - name: POSTGRES_DB + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_DB + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-configuration + key: POSTGRES_PASSWORD + - name: PGDATABASE + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGDATABASE + - name: PGUSER + valueFrom: + secretKeyRef: + name: postgres-configuration + key: PGUSER + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + initContainers: + - name: postgres-db-permissions-fix + image: alpine:3.12 + command: + - /bin/sh + - -c + - "/bin/chown -R 70:70 /var/lib/postgresql/data" + volumeMounts: + - name: var-lib-postgresql + mountPath: /var/lib/postgresql/data + volumes: + - name: var-lib-postgresql + persistentVolumeClaim: + claimName: postgres-pvc + - name: var-run-postgresql + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: "5432" + port: 5432 + targetPort: 5432 +#+end_src +** Deploying Postgres +#+begin_src shell :noweb yes +kubectl apply -f - << EOF +<> +--- +<> +EOF +#+end_src + +#+RESULTS: +#+begin_example +secret/postgres-configuration created +persistentvolumeclaim/postgres-pvc created +statefulset.apps/postgres created +service/postgres created +#+end_example + +* Setting local vars +** Defaults for sql-mode blocks +#+begin_src elisp :result silent +(set (make-local-variable 'org-babel-default-header-args:sql-mode) + ;; Set up all sql-mode blocks to be postgres and literate + '((:results . "replace code") + (:product . "postgres") + (:session . "none") + (:noweb . "yes") + (:comments . "no") + (:wrap . "SRC example"))) +#+end_src + +#+RESULTS: +#+begin_src elisp +((:results . "replace code") + (:product . "postgres") + (:session . "none") + (:noweb . "yes") + (:comments . "no") + (:wrap . "SRC example")) +#+end_src + +** Deault for connecting to sql-mode +#+begin_src elisp :results silent +(set (make-local-variable 'sql-server) "postgres") +(set (make-local-variable 'sql-port) 5432) +(set (make-local-variable 'sql-user) "postgres") +(set (make-local-variable 'sql-database) "postgres") +(set (make-local-variable 'sql-product) '(quote postgres)) +#+end_src + +** Deault for creating new sql-mode connections +#+begin_src elisp :results silent +(set (make-local-variable 'sql-connection-alist) + (list + ;; setting these allows for the connection to be + ;; created on the fly + (list 'none + (list 'sql-product '(quote postgres)) + (list 'sql-user sql-user) + (list 'sql-database sql-database) + (list 'sql-port sql-port) + (list 'sql-server sql-server)))) +#+end_src +* Verify things are up +** Pod +#+begin_src shell +kubectl get pod -l app=postgres +#+end_src + +#+RESULTS: +#+begin_example +NAME READY STATUS RESTARTS AGE +postgres-0 1/1 Running 0 47s +#+end_example +** Service +#+begin_src shell +kubectl get service postgres +#+end_src + +#+RESULTS: +#+begin_example +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +postgres ClusterIP 10.96.151.12 5432/TCP 74s +#+end_example + +* SQL +#+begin_src sql-mode +select 1; +#+end_src + +#+RESULTS: +#+begin_SRC example + ?column? +---------- + 1 +(1 row) + +#+end_SRC diff --git a/research/pik8s.org b/research/pik8s.org new file mode 100644 index 0000000..3669372 --- /dev/null +++ b/research/pik8s.org @@ -0,0 +1,4631 @@ +#+PROPERTY: header-args:shell :dir /ssh:ubuntu@192.168.1.101: +#+PROPERTY: header-args:shell+ :results code +#+PROPERTY: header-args:shell+ :prologue "(\n" +#+PROPERTY: header-args:shell+ :epilogue ") 2>&1\n:\n" +#+PROPERTY: header-args:shell+ :wrap EXAMPLE +* Start Over +** allover + #+name: start over + #+begin_src shell + <> + <> + <> + sudo rm -rf /etc/cni/net.d + #+end_src + + #+RESULTS: start over + #+begin_EXAMPLE + [reset] Reading configuration from the cluster... + [reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' + [preflight] Running pre-flight checks + [reset] Removing info for node "ubuntu" from the ConfigMap "kubeadm-config" in the "kube-system" Namespace + W0102 08:12:00.167924 28338 removeetcdmember.go:61] [reset] failed to remove etcd member: error syncing endpoints with etc: etcdclient: no available endpoints + .Please manually remove this etcd member using etcdctl + [reset] Stopping the kubelet service + [reset] Unmounting mounted directories in "/var/lib/kubelet" + [reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki] + [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf] + [reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni] + + The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d + + The reset process does not reset or clean up iptables rules or IPVS tables. + If you wish to reset iptables, you must do so manually by using the "iptables" command. + + If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar) + to reset your system's IPVS tables. + + The reset process does not clean your kubeconfig files and you must remove them manually. + Please, check the contents of the $HOME/.kube/config file. + No command with matching syntax recognised. Run 'vgremove --help' for more information. + Correct command syntax is: + vgremove VG|Tag|Select ... + + Volume group "sdb" not found + Cannot process volume group sdb + Device /dev/sdb not found. + #+end_EXAMPLE + +** kubeadm reset + #+name: kubeadm reset + #+begin_src shell :async t + sudo kubeadm reset -f + #+end_src + + #+RESULTS: kubeadm reset + #+begin_EXAMPLE + [preflight] Running pre-flight checks + W0101 07:54:26.429924 1862 removeetcdmember.go:79] [reset] No kubeadm config, using etcd pod spec to get data directory + [reset] No etcd config found. Assuming external etcd + [reset] Please, manually reset etcd to prevent further issues + [reset] Stopping the kubelet service + [reset] Unmounting mounted directories in "/var/lib/kubelet" + [reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki] + [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf] + [reset] Deleting contents of stateful directories: [/var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni] + + The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d + + The reset process does not reset or clean up iptables rules or IPVS tables. + If you wish to reset iptables, you must do so manually by using the "iptables" command. + + If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar) + to reset your system's IPVS tables. + + The reset process does not clean your kubeconfig files and you must remove them manually. + Please, check the contents of the $HOME/.kube/config file. + #+end_EXAMPLE + +** lvm reset + #+NAME: lvm reset + #+begin_src shell + VG=$(sudo lvs | tail -1 | awk '{print $2}') + sudo lvm vgremove $VG --force --force + sudo lvm lvremove /dev/sdb --force --force + sudo pvremove /dev/sdb + #+end_src + +** rook reset + #+NAME: rook reset + #+begin_src shell + sudo rm -rf /var/lib/rook/ + #+end_src +** uninstall rook + #+begin_src shell + helm uninstall rook-ceph --namespace rook-ceph + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + release "rook-ceph" uninstalled + #+end_EXAMPLE + +** uninstall traefik + #+begin_src shell + helm uninstall ii-traefik --namespace traefik + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + manifest-2 + + release "ii-traefik" uninstalled + #+end_EXAMPLE + +* Configuration +** kubeadm-config.yaml + #+NAME: kubeadm-config.yaml + #+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:kubeadm-config.yaml :noweb yes + apiVersion: kubeadm.k8s.io/v1beta1 + kind: InitConfiguration + localAPIEndpoint: + advertiseAddress: "192.168.1.101" + nodeRegistration: + taints: [] # defaults to NoSchedule on role=master + --- + apiVersion: kubeadm.k8s.io/v1beta1 + kind: ClusterConfiguration + kubernetesVersion: v1.17.0 + controlPlaneEndpoint: "" + networking: + podSubnet: "10.244.0.0/16" + serviceSubnet: "10.96.0.0/12" + apiServer: + extraArgs: + service-node-port-range: "1-60000" # allow more ports via API + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + nodePortAddresses: + - "192.168.1.0/24" # default is null + portRange: "1-60000" # Proxy also needs port range to ensure we can use 22,80,443,and friends + #+END_SRC +** rook-config.yaml + #+name: rook-config.yaml + #+begin_src yaml :tangle /ssh:ubuntu@192.168.1.101:rook.yaml :noweb yes + image: + prefix: rook + repository: rook/ceph + tag: master + pullPolicy: IfNotPresent + + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 200m + memory: 512Mi + + rbacEnable: true + pspEnable: true + #+end_src +** rook-cluster.yaml + #+begin_src yaml :tangle /ssh:ubuntu@192.168.1.101:rook-cluster.yaml :noweb yes + apiVersion: ceph.rook.io/v1 + kind: CephCluster + metadata: + name: rook-ceph + namespace: rook-ceph + spec: + cephVersion: + image: ceph/ceph:v14.2.5 + allowUnsupported: false + dataDirHostPath: /var/lib/rook + mon: + count: 1 + allowMultiplePerNode: false + dashboard: + enabled: true + ssl: false + monitoring: + enabled: false # requires Prometheus to be pre-installed + rulesNamespace: rook-ceph + network: + hostNetwork: false + storage: + useAllNodes: true + useAllDevices: false + deviceFilter: "^sd" + #+end_src +** ceph-block-pool.yaml + #+begin_src yaml :tangle /ssh:ubuntu@192.168.1.101:ceph-block-pool.yaml :noweb yes + apiVersion: ceph.rook.io/v1 + kind: CephBlockPool + metadata: + name: ii-block-pool + namespace: rook-ceph + spec: + replicated: + size: 1 + #+end_src +** storage-class.yaml + #+begin_src yaml :tangle /ssh:ubuntu@192.168.1.101:storage-class.yaml :noweb yes + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: standard + #provisioner: rook-ceph.cephfs.csi.ceph.com + provisioner: rook-ceph.rbd.csi.ceph.com + parameters: + # clusterID is the namespace where operator is deployed. + clusterID: rook-ceph + + # CephFS filesystem name into which the volume shall be created + # fsName: iifs + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: ii-block-pool + # RBD image format. Defaults to "2". + imageFormat: "2" + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # Root path of an existing CephFS volume + # Required for provisionVolume: "false" + # rootPath: /absolute/path + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + csi.storage.k8s.io/fstype: ext4 + # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse + # or by setting the default mounter explicitly via --volumemounter command-line argument. + # mounter: kernel + reclaimPolicy: Retain + # reclaimPolicy: Delete + mountOptions: + # uncomment the following line for debugging + #- debug + # uncomment the following to use rbd-nbd as mounter on supported nodes + #mounter: rbd-nbd + #+end_src +** rook-tools.yaml + #+begin_src yaml :tangle /ssh:ubuntu@192.168.1.101:rook-tools.yaml :noweb yes + apiVersion: apps/v1 + kind: Deployment + metadata: + name: rook-ceph-tools + namespace: rook-ceph + labels: + app: rook-ceph-tools + spec: + replicas: 1 + selector: + matchLabels: + app: rook-ceph-tools + template: + metadata: + labels: + app: rook-ceph-tools + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: rook/ceph:v1.2.0 + command: ["/tini"] + args: ["-g", "--", "/usr/local/bin/toolbox.sh"] + imagePullPolicy: IfNotPresent + env: + - name: ROOK_ADMIN_SECRET + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: admin-secret + securityContext: + privileged: true + volumeMounts: + - mountPath: /dev + name: dev + - mountPath: /sys/bus + name: sysbus + - mountPath: /lib/modules + name: libmodules + - name: mon-endpoint-volume + mountPath: /etc/rook + # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021 + hostNetwork: true + volumes: + - name: dev + hostPath: + path: /dev + - name: sysbus + hostPath: + path: /sys/bus + - name: libmodules + hostPath: + path: /lib/modules + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints + #+end_src +** traefik-1.7-config.yaml +If you use this annotation on the PVC, it will skip deleting the resource on uninstall. + +#+name: keepers +#+begin_src yaml +helm.sh/resource-policy: "keep" +#+end_src + +Password injection has been a bit of a pain, for some reason it shows up twice. +I'd like to figure out why it ejects a newline: + +"$apr$PASSWORD +" +And why that newline results in repeating the yaml lines when used as a noweb executable argument. + +#+NAME: traefik-admin-password +#+BEGIN_SRC shell :results silent :dir "." :results value :epilogue "" :prologue "" +# . .traefik.env +# echo -n $TRAEFIK_ADMIN_PASS | htpasswd -i -n '' | sed s/^:// | head -1 +# htpasswd -n -b '' iiadmin | sed s/^:// | head -1 +htpasswd -n -b root iiroot | sed s/^root:// | head -1 +#+END_SRC +#+begin_src emacs-lisp :results value +#+end_src +#+NAME: traefik.yaml helm values +#+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:traefik-1.7-config.yaml :noweb yes + deployment: + hostPort: + httpEnabled: true + httpsEnabled: true + dashboardEnabled: true + httpPort: 80 + httpsPort: 443 + dashboardPort: 8080 + # labels to add to the deployment + labels: + dep-label: ii + annotations: + dep-anno: ii + # labels to add to the pod container metadata + podLabels: + pod-label: ii + podAnnotations: + pod-anno: ii + service: + ## Further config for service of type NodePort + ## Default config with empty string "" will assign a dynamic + ## nodePort to http and https ports + # nodePorts: + # http: "80" + # https: "443" + # serviceType: NodePort + annotations: + service-anno: ii + labels: + service-label: ii + #loadBalancerIP: 192.168.1.101 + # kubernetes.io/ingress.class=traefik + # ingressClass = "traefik-internal" + # https://docs.traefik.io/configuration/backends/kubernetes/#ingressendpoint + # testuser: $apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11 + dashboard: + enabled: true + domain: traefik.ii.nz + auth: + basic: + admin: $apr1$We5npcg/$Z1rVvxv82ZFQ97aEwyj0k0 + testuser: $apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11 + root: $apr1$ILfACRlz$L2X6Sfxnrkg90OIblA3t5. + ssl: + enabled: true + enforced: true + permanentRedirect: true + # service: + # annotations: + # labels: + rbac: + enabled: true + accessLogs: + enabled: true + format: json + fields: + defaultMode: keep + # kubernetes: + # ingressEndpoint: + # ip: 192.168.1.101 + # namespaces: [] # all namespaces with empty array + # namespaces: + # - apisnoop + # - default + # - kube-system + acme: + enabled: true + email: hh@ii.coop + staging: false + # challengeType: tls-sni-01 + # challengeType: http-01 + # Unable to obtain ACME certificate for domains \"hh-hasura.apisnoop.io\" + # detected thanks to rule \"Host:hh-hasura.apisnoop.io\" : + # unable to generate a certificate for the domains [hh-hasura.apisnoop.io]: + # acme: Error -> One or more domains had a problem:\n[hh-hasura.apisnoop.io] + # acme: error: 403 :: urn:ietf:params:acme:err or:unauthorized :: + # Invalid response from https://hh-hasura.apisnoop.io/.well-known/acme-challenge/2znqGrOWczcTMbLmN5NVm2OwcpQGT_ViPhEoJOpKQb8 + # [35.189.56.228]: 404, ur l: \n + challengeType: tls-alpn-01 + # challengeType: dns-01 # Needed for wildcards + resolvers: + - 1.1.1.1:53 + - 8.8.8.8:53 + persistence: + # We don't want helm to delete our pvc + # https://github.com/helm/helm/issues/6261#issuecomment-523472128 + annotations: + helm.sh/resource-policy: "keep" + enable: true + storageClass: standard + accessMode: ReadWriteOnce + size: 1Gi + # only use if claim already exists + # existingClaim: ii-traefik-acme + # domains: + # enabled: false + # domainsList: + # - main: "*.apisnoop.io" + # - sans: + # - "traefik.apisnoop.io" + # - "hh-apisnoop.apisnoop.io" + # - "zz-apisnoop.apisnoop.io" + # dnsProvider: + # # name: dnsimple + # dnsimple: + # DNSIMPLE_OAUTH_TOKEN: "" + # DNSIMPLE_BASE_URL: "https://api.dnsimple.com/v2/" +#+END_SRC + +** rook-ingress.yaml +#+NAME: traefik.yaml helm values +#+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:rook-ingress.yaml :noweb yes + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + name: rook-ceph-mrg-dashboard + spec: + rules: + - host: rook.ii.nz + http: + paths: + - backend: + serviceName: rook-ceph-mgr-dashboard + servicePort: dashboard +#+END_SRC + +** nginx-cephfs-pvc.yaml + #+NAME: nginx-cephfs-pc.yaml + #+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:nginx-cephfs-pvc.yaml :noweb yes + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: html-content + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + storageClassName: cephfs + #+END_SRC +** nginx-deployment.yaml + #+NAME: nginx-deployment.yaml + #+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:nginx-deployment.yaml :noweb yes + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + labels: + app: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + volumes: + - name: html-content + persistentVolumeClaim: + claimName: html-content + readOnly: false + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + volumeMounts: + - name: html-content + mountPath: /var/lib/registry + #+END_SRC +** nginx-service.yaml + #+NAME: kubeadm-config.yaml + #+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:nginx-service.yaml :noweb yes + apiVersion: v1 + kind: Service + metadata: + name: nginx-service + spec: + selector: + app: nginx + type: NodePort + ports: + - protocol: TCP + port: 80 + targetPort: 80 + #+END_SRC +** nginx-ingress.yaml +#+NAME: nginx-ingress.yaml helm values +#+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:nginx-ingress.yaml :noweb yes + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + name: ii-web + spec: + rules: + - host: web.pi.ii.nz + http: + paths: + - backend: + serviceName: nginx-service + servicePort: 80 +#+END_SRC + +** cephfs.yaml +#+NAME: cephfs.yaml +#+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:cephfs.yaml :noweb yes +apiVersion: ceph.rook.io/v1 +kind: CephFilesystem +metadata: + name: iifs + namespace: rook-ceph +spec: + metadataPool: + replicated: + size: 1 + dataPools: + - failureDomain: osd + replicated: + size: 1 + preservePoolsOnDelete: true + metadataServer: + activeCount: 1 + activeStandby: true +#+END_SRC + +** cephfs-storage-class.yaml +#+NAME: cephfs-storage-class.yaml +#+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:cephfs-storage-class.yaml :noweb yes + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: cephfs + provisioner: rook-ceph.cephfs.csi.ceph.com + parameters: + # clusterID is the namespace where operator is deployed. + clusterID: rook-ceph + + # CephFS filesystem name into which the volume shall be created + fsName: iifs + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: ii-block-pool + + # Root path of an existing CephFS volume + # Required for provisionVolume: "false" + # rootPath: /absolute/path + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + + # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse + # or by setting the default mounter explicitly via --volumemounter command-line argument. + # mounter: kernel + reclaimPolicy: Delete + mountOptions: + # uncomment the following line for debugging + #- debug +#+END_SRC + +* Steps + If you run them all, at about ~10 minutes you should nsee a ceph-osd-prepare-ubuntu job/pod. +This creates the ceph lvm out of /dev/sda +Assuming it doesn't have any other partittions on it. +** cluster up + #+begin_src shell + <> + <> + <> + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + #+end_EXAMPLE + +** install rook + traefik +These need to be installed first, as they provide CRDs for the remaining rook/pvc objects. + #+begin_src shell + <> + <> + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + namespace/rook-ceph created + NAME: rook-ceph + LAST DEPLOYED: Thu Jan 2 08:15:13 2020 + NAMESPACE: rook-ceph + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + The Rook Operator has been installed. Check its status by running: + kubectl --namespace rook-ceph get pods -l "app=rook-ceph-operator" + + Visit https://rook.io/docs/rook/master for instructions on how to create and configure Rook clusters + + Note: You cannot just create a CephCluster resource, you need to also create a namespace and + install suitable RBAC roles and role bindings for the cluster. The Rook Operator will not do + this for you. Sample CephCluster manifest templates that include RBAC resources are available: + + - https://rook.github.io/docs/rook/master/ceph-quickstart.html + - https://github.com/rook/rook/blob/master/cluster/examples/kubernetes/ceph/cluster.yaml + + Important Notes: + - The links above are for the unreleased master version, if you deploy a different release you must find matching manifests. + - You must customise the 'CephCluster' resource at the bottom of the sample manifests to met your situation. + - Each CephCluster must be deployed to its own namespace, the samples use `rook-ceph` for the cluster. + - The sample manifests assume you also installed the rook-ceph operator in the `rook-ceph` namespace. + - The helm chart includes all the RBAC required to create a CephCluster CRD in the same namespace. + - Any disk devices you add to the cluster in the 'CephCluster' must be empty (no filesystem and no partitions). + - In the 'CephCluster' you must refer to disk devices by their '/dev/something' name, e.g. 'sdb' or 'xvde'. + namespace/traefik created + NAME: ii-traefik + LAST DEPLOYED: Thu Jan 2 08:15:22 2020 + NAMESPACE: traefik + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + 1. Get Traefik's load balancer IP/hostname: + + NOTE: It may take a few minutes for this to become available. + + You can watch the status by running: + + $ kubectl get svc ii-traefik --namespace traefik -w + + Once 'EXTERNAL-IP' is no longer '': + + $ kubectl describe svc ii-traefik --namespace traefik | grep Ingress | awk '{print $3}' + + 2. Configure DNS records corresponding to Kubernetes ingress resources to point to the load balancer IP/hostname found in step 1 + #+end_EXAMPLE + +** deploy and install + #+begin_src shell + <> + <> + <> + <> + <> + <> + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + serviceaccount/weave-net created + clusterrole.rbac.authorization.k8s.io/weave-net created + clusterrolebinding.rbac.authorization.k8s.io/weave-net created + role.rbac.authorization.k8s.io/weave-net created + rolebinding.rbac.authorization.k8s.io/weave-net created + daemonset.apps/weave-net created + cephcluster.ceph.rook.io/rook-ceph created + storageclass.storage.k8s.io/standard created + deployment.apps/rook-ceph-tools created + #+end_EXAMPLE + +** kubeadm init + #+name: kubeadm init + #+begin_src shell :async t + sudo kubeadm init --config kubeadm-config.yaml + #+end_src + + #+RESULTS: kubeadm init + #+begin_EXAMPLE + W0101 08:01:49.858946 3638 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version. + W0101 08:01:49.860637 3638 common.go:77] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta1". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version. + W0101 08:01:49.864775 3638 validation.go:28] Cannot validate kube-proxy config - no validator is available + W0101 08:01:49.864823 3638 validation.go:28] Cannot validate kubelet config - no validator is available + [init] Using Kubernetes version: v1.17.0 + [preflight] Running pre-flight checks + [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service' + [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ + [preflight] Pulling images required for setting up a Kubernetes cluster + [preflight] This might take a minute or two, depending on the speed of your internet connection + [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' + [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" + [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + [kubelet-start] Starting the kubelet + [certs] Using certificateDir folder "/etc/kubernetes/pki" + [certs] Generating "ca" certificate and key + [certs] Generating "apiserver" certificate and key + [certs] apiserver serving cert is signed for DNS names [ubuntu kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.101] + [certs] Generating "apiserver-kubelet-client" certificate and key + [certs] Generating "front-proxy-ca" certificate and key + [certs] Generating "front-proxy-client" certificate and key + [certs] Generating "etcd/ca" certificate and key + [certs] Generating "etcd/server" certificate and key + [certs] etcd/server serving cert is signed for DNS names [ubuntu localhost] and IPs [192.168.1.101 127.0.0.1 ::1] + [certs] Generating "etcd/peer" certificate and key + [certs] etcd/peer serving cert is signed for DNS names [ubuntu localhost] and IPs [192.168.1.101 127.0.0.1 ::1] + [certs] Generating "etcd/healthcheck-client" certificate and key + [certs] Generating "apiserver-etcd-client" certificate and key + [certs] Generating "sa" key and public key + [kubeconfig] Using kubeconfig folder "/etc/kubernetes" + [kubeconfig] Writing "admin.conf" kubeconfig file + [kubeconfig] Writing "kubelet.conf" kubeconfig file + [kubeconfig] Writing "controller-manager.conf" kubeconfig file + [kubeconfig] Writing "scheduler.conf" kubeconfig file + [control-plane] Using manifest folder "/etc/kubernetes/manifests" + [control-plane] Creating static Pod manifest for "kube-apiserver" + [control-plane] Creating static Pod manifest for "kube-controller-manager" + W0101 08:02:04.870516 3638 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" + [control-plane] Creating static Pod manifest for "kube-scheduler" + W0101 08:02:04.879957 3638 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" + [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" + [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s + [kubelet-check] Initial timeout of 40s passed. + [apiclient] All control plane components are healthy after 46.511568 seconds + [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace + [kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster + [upload-certs] Skipping phase. Please see --upload-certs + [mark-control-plane] Marking the node ubuntu as control-plane by adding the label "node-role.kubernetes.io/master=''" + [bootstrap-token] Using token: p7v81s.8coeumseuna5t7o9 + [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles + [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials + [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token + [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster + [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace + [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key + [addons] Applied essential addon: CoreDNS + [addons] Applied essential addon: kube-proxy + + Your Kubernetes control-plane has initialized successfully! + + To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + + You should now deploy a pod network to the cluster. + Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + + Then you can join any number of worker nodes by running the following on each as root: + + kubeadm join 192.168.1.101:6443 --token rfmdm3.7qgj0l72m3c7ol9d \ + --discovery-token-ca-cert-hash sha256:aa68bdc1de848cf6efed7b690052f621336bb2743f490abc93efa778c5a05440 + #+end_EXAMPLE + +** copy new kubeconfig into place +file:~/.kube/config + #+NAME: cp kubeconfig + #+begin_src shell :results silent + mkdir -p $HOME/.kube + sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + #+end_src +** scp kubeconfig + #+NAME: scp kubeconfig + #+begin_src shell :results silent :dir ~/ + scp ubuntu@192.168.1.101:.kube/config $HOME/.kube/config + #+end_src +** apply cni-weaveworks +https://www.weave.works/docs/net/latest/kubernetes/kube-addon/ + #+name: apply cni-weaveworks + #+begin_src shell + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + #+end_src + + #+RESULTS: apply cni-weaveworks + #+begin_EXAMPLE + serviceaccount/weave-net created + clusterrole.rbac.authorization.k8s.io/weave-net created + clusterrolebinding.rbac.authorization.k8s.io/weave-net created + role.rbac.authorization.k8s.io/weave-net created + rolebinding.rbac.authorization.k8s.io/weave-net created + daemonset.apps/weave-net created + #+end_EXAMPLE + +** install rook operator + #+name: install rook operator + #+begin_src shell + kubectl create ns rook-ceph + helm install rook-ceph --namespace rook-ceph rook-release/rook-ceph -f rook.yaml + #+end_src + +** apply cephcluster CRD + This takes a while, the crashcollector needs a secret that doesn't seem to be created until after 3/4 minutes. + #+name: apply cephcluster CRD + #+begin_src shell + kubectl apply -f rook-cluster.yaml + # kubectl delete -f rook-cluster.yaml + #+end_src + +** apply ceph-block-pool + + ceph-osd-prepare container starts about now + #+name: apply ceph-block-pool + #+begin_src shell + kubectl apply -f ceph-block-pool.yaml + #+end_src + + #+RESULTS: apply ceph-block-pool + #+begin_EXAMPLE + cephblockpool.ceph.rook.io/ii-block-pool created + #+end_EXAMPLE + +** apply storage-class + #+name: apply storage-class + #+begin_src shell + # kubectl delete -f storage-class.yaml + kubectl apply -f storage-class.yaml + #+end_src + + #+RESULTS: apply storage-class + #+begin_EXAMPLE + storageclass.storage.k8s.io/standard configured + #+end_EXAMPLE + + #+RESULTS: + #+begin_EXAMPLE + storageclass.storage.k8s.io/standard created + #+end_EXAMPLE + +** apply rook-tools + #+name: apply rook-tools + #+begin_src shell + kubectl apply -f rook-tools.yaml + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + deployment.apps/rook-ceph-tools created + #+end_EXAMPLE +** apply rook-ingress + #+name: apply rook-ingress + #+begin_src shell + kubectl apply -f rook-ingress.yaml -n rook-ceph + #+end_src + + #+RESULTS: apply rook-ingress + #+begin_EXAMPLE + ingress.extensions/rook-ceph-mrg-dashboard created + #+end_EXAMPLE + +** apply cephfs + #+name: apply cephfs + #+begin_src shell + kubectl apply -f cephfs.yaml -n rook-ceph + #+end_src + + #+RESULTS: apply cephfs + #+begin_EXAMPLE + cephfilesystem.ceph.rook.io/iifs created + #+end_EXAMPLE + +** apply cephfs-storage-class + #+name: apply cephfs-storage-class + #+begin_src shell + kubectl apply -f cephfs-storage-class.yaml -n rook-ceph + #+end_src + + #+RESULTS: apply cephfs-storage-class + #+begin_EXAMPLE + storageclass.storage.k8s.io/cephfs created + #+end_EXAMPLE + +** install traefik + #+name: install traefik + #+begin_src shell + kubectl create namespace traefik + helm install \ + ii-traefik \ + --namespace traefik \ + --values $HOME/traefik-1.7-config.yaml \ + stable/traefik + # --values $HOME/traefik-config.yaml \ + # $HOME/traefik-helm-chart + #+end_src + +** deploy nginx + This takes a while, the crashcollector needs a secret that doesn't seem to be created until after 3/4 minutes. + #+name: deploy nginx + #+begin_src shell + kubectl apply -f nginx-cephfs-pvc.yaml + kubectl apply -f nginx-deployment.yaml + kubectl apply -f nginx-service.yaml + kubectl apply -f nginx-ingress.yaml + #+end_src + + #+RESULTS: deploy nginx + #+begin_EXAMPLE + persistentvolumeclaim/html-content created + deployment.apps/nginx configured + service/nginx-service unchanged + ingress.extensions/ii-web unchanged + #+end_EXAMPLE +* Installing htpasswd + #+begin_src shell + sudo apt-get install -y apache2-utils + #+end_src + +* Explore +** get a list of crds created by rook-ceph + #+begin_src shell + kubectl get crd + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME CREATED AT + cephblockpools.ceph.rook.io 2020-01-01T08:03:33Z + cephclients.ceph.rook.io 2020-01-01T08:03:33Z + cephclusters.ceph.rook.io 2020-01-01T08:03:33Z + cephfilesystems.ceph.rook.io 2020-01-01T08:03:33Z + cephnfses.ceph.rook.io 2020-01-01T08:03:33Z + cephobjectstores.ceph.rook.io 2020-01-01T08:03:33Z + cephobjectstoreusers.ceph.rook.io 2020-01-01T08:03:33Z + objectbucketclaims.objectbucket.io 2020-01-01T08:03:33Z + objectbuckets.objectbucket.io 2020-01-01T08:03:33Z + volumes.rook.io 2020-01-01T08:03:33Z + volumesnapshotclasses.snapshot.storage.k8s.io 2020-01-02T08:00:04Z + volumesnapshotcontents.snapshot.storage.k8s.io 2020-01-02T08:00:04Z + volumesnapshots.snapshot.storage.k8s.io 2020-01-02T08:00:04Z + #+end_EXAMPLE +** describe pod/rook-ceph-operator + #+begin_src shell + ROOT_OP_POD=$(kubectl --namespace rook-ceph get pods -l "app=rook-ceph-operator" -o name) + kubectl describe --namespace rook-ceph $ROOT_OP_POD + #+end_src +** get pod/rook-ceph-operator + + #+begin_src shell + ROOT_OP_POD=$(kubectl --namespace rook-ceph get pods -l "app=rook-ceph-operator" -o name) + kubectl get --namespace rook-ceph $ROOT_OP_POD + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME READY STATUS RESTARTS AGE + rook-ceph-operator-5cf57b4fd7-v44rf 1/1 Running 0 7m56s + #+end_EXAMPLE +** get cephclusters + + #+begin_src shell + kubectl get cephclusters.ceph.rook.io --namespace=rook-ceph + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME DATADIRHOSTPATH MONCOUNT AGE STATE HEALTH + rook-ceph /var/lib/rook 1 3d10h Created HEALTH_WARN + #+end_EXAMPLE +** get rook-ceph services + #+begin_src shell + kubectl get service --namespace=rook-ceph + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + csi-cephfsplugin-metrics ClusterIP 10.96.128.28 8080/TCP,8081/TCP 97m + csi-rbdplugin-metrics ClusterIP 10.96.135.214 8080/TCP,8081/TCP 97m + rook-ceph-mgr ClusterIP 10.96.48.184 9283/TCP 87m + rook-ceph-mgr-dashboard ClusterIP 10.96.229.60 7000/TCP 93m + rook-ceph-mon-a ClusterIP 10.96.136.146 6789/TCP,3300/TCP 96m + #+end_EXAMPLE +** get rook-ceph pods + #+begin_src shell + kubectl get pods --namespace=rook-ceph + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME READY STATUS RESTARTS AGE + csi-cephfsplugin-58xhs 3/3 Running 0 98m + csi-cephfsplugin-provisioner-56c8b7ddf4-4gvvt 4/4 Running 0 98m + csi-cephfsplugin-provisioner-56c8b7ddf4-xrmc2 4/4 Running 0 98m + csi-rbdplugin-kj2tq 3/3 Running 0 98m + csi-rbdplugin-provisioner-6ff4dd4b94-b56jd 5/5 Running 1 98m + csi-rbdplugin-provisioner-6ff4dd4b94-h7vss 5/5 Running 1 98m + rook-ceph-crashcollector-ubuntu-5df5c69d4b-r7b5m 1/1 Running 0 93m + rook-ceph-mgr-a-9b8cc4c58-r5wvb 1/1 Running 1 93m + rook-ceph-mon-a-64c6dc5dc9-knc9l 1/1 Running 0 97m + rook-ceph-operator-5cf57b4fd7-dq586 1/1 Running 0 98m + rook-ceph-osd-prepare-ubuntu-ljfvl 0/1 Completed 0 87m + rook-discover-n9cvl 1/1 Running 0 98m + #+end_EXAMPLE +** free memory + #+begin_src shell + free -m + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + total used free shared buff/cache available + Mem: 3791 1765 397 8 1628 2143 + Swap: 0 0 0 + #+end_EXAMPLE +** get ceph dashboard password + #+name: dashboard password + #+begin_src shell :results silent + kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo + #+end_src + + #+name: port forward to access dashboard + #+begin_src shell + kubectl port-forward -n rook-ceph service/rook-ceph-mgr-dashboard 7000 + #+end_src +** describe pod/traefik + #+begin_src shell + ROOT_OP_POD=$(kubectl --namespace rook-ceph get pods -l "app=rook-ceph-operator" -o name) + kubectl describe --namespace rook-ceph $ROOT_OP_POD + #+end_src +** copy html to web.pi.ii.nz + #+begin_src shell :dir "." + NGINX_POD=$(kubectl get pods -l "app=nginx" -o name | sed s:pod/::) + kubectl cp pik8s.html $NGINX_POD:/usr/share/nginx/html/index.html + #+end_src + +* Understanding why the PVC isn't create +** get traefik pvc + #+begin_src shell :wrap "src json" + kubectl get pvc -n traefik ii-traefik-acme -o json + #+end_src + + #+RESULTS: + #+begin_src json + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "annotations": { + "helm.sh/resource-policy": "keep", + "volume.beta.kubernetes.io/storage-provisioner": "rook-ceph.rbd.csi.ceph.com" + }, + "creationTimestamp": "2019-12-31T20:51:29Z", + "finalizers": [ + "kubernetes.io/pvc-protection" + ], + "labels": { + "app": "traefik", + "chart": "traefik-1.85.0", + "heritage": "Helm", + "release": "ii-traefik" + }, + "name": "ii-traefik-acme", + "namespace": "traefik", + "resourceVersion": "3413", + "selfLink": "/api/v1/namespaces/traefik/persistentvolumeclaims/ii-traefik-acme", + "uid": "79b47f05-e5b6-4645-bd09-228cccb2f61e" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "1Gi" + } + }, + "storageClassName": "standard", + "volumeMode": "Filesystem" + }, + "status": { + "phase": "Pending" + } + } + #+end_src + +** describe traefik pvc + #+begin_src shell :wrap "src json" + kubectl describe -n traefik pvc/ii-traefik-acme + #+end_src + + #+RESULTS: + #+begin_src json + Name: ii-traefik-acme + Namespace: traefik + StorageClass: standard + Status: Pending + Volume: + Labels: app=traefik + chart=traefik-1.85.0 + heritage=Helm + release=ii-traefik + Annotations: helm.sh/resource-policy: keep + volume.beta.kubernetes.io/storage-provisioner: rook-ceph.rbd.csi.ceph.com + Finalizers: [kubernetes.io/pvc-protection] + Capacity: + Access Modes: + VolumeMode: Filesystem + Mounted By: ii-traefik-59db7c8bdc-48nz5 + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ExternalProvisioning 4m54s (x622 over 160m) persistentvolume-controller waiting for a volume to be created, either by external provisioner "rook-ceph.rbd.csi.ceph.com" or manually created by system administrator + Normal Provisioning 70s (x41 over 160m) rook-ceph.rbd.csi.ceph.com_csi-rbdplugin-provisioner-6ff4dd4b94-fhrnr_7626e9a6-e923-4659-9885-fa7eb7755b8c External provisioner is provisioning volume for claim "traefik/ii-traefik-acme" + #+end_src + +** get pods + #+begin_src shell + kubectl get pods --namespace rook-ceph + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME READY STATUS RESTARTS AGE + csi-cephfsplugin-provisioner-56c8b7ddf4-dfh7k 4/4 Running 0 166m + csi-cephfsplugin-provisioner-56c8b7ddf4-z9279 4/4 Running 0 166m + csi-cephfsplugin-tgsk5 3/3 Running 0 166m + csi-rbdplugin-provisioner-6ff4dd4b94-4tw42 5/5 Running 1 166m + csi-rbdplugin-provisioner-6ff4dd4b94-fhrnr 5/5 Running 0 166m + csi-rbdplugin-vmn8h 3/3 Running 0 166m + rook-ceph-crashcollector-ubuntu-5df5c69d4b-czw28 1/1 Running 0 163m + rook-ceph-mgr-a-648b49bb98-q4dq4 1/1 Running 0 163m + rook-ceph-mon-a-78866995b5-gjhb8 1/1 Running 0 165m + rook-ceph-operator-5cf57b4fd7-lknjh 1/1 Running 0 168m + rook-ceph-osd-prepare-ubuntu-lqsxx 1/1 Running 0 160m + rook-ceph-tools-75498b5cfc-p2ppl 1/1 Running 0 165m + rook-discover-26g8l 1/1 Running 0 168m + #+end_EXAMPLE + +* rook-tools + :PROPERTIES: + :header-args:shell+: :dir ~/ + :header-args:shell+: :prologue "kubectl -n rook-ceph exec -i `kubectl -n rook-ceph get pod -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}' ` bash\n(" + :header-args:shell+: :epilogue ") 2>&1\n:\n" + :END: + #+begin_src shell :prologue "kubectl -n rook-ceph exec -i `kubectl -n rook-ceph get pod -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}' ` bash" :epilogue "" +** ceph commands + #+name: ceph commands + #+begin_src shell :var COMMAND="ceph status" + kubectl -n rook-ceph exec -it \ + $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') \ + -- ceph -h | grep -i list + #+end_src + + #+RESULTS: ceph commands + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + daemonperf {type.id | path} list|ls [stat-pats] [priority] + List shows a table of all available stats + auth ls list authentication state + balancer ls List all plans + balancer pool ls List automatic balancing pools. Note + that empty list means all existing + config ls List available configuration options + config-key ls list keys + dashboard iscsi-gateway-list List iSCSI gateways + fs ls list filesystems + fs subvolume ls { List subvolume snapshots + fs subvolumegroup ls List subvolumegroups + fs subvolumegroup snapshot ls [. List devices on a node + orchestrator host ls List hosts + orchestrator service ls {} {mon| List services known to orchestrator + osd blacklist add|rm add (optionally until seconds + blacklist + osd blacklist clear clear all blacklisted clients + osd blacklist ls show blacklisted clients + osd crush class ls list all crush device classes + osd crush class ls-osd list all osds belonging to the specific + osd crush ls list items beneath a node in the CRUSH + osd crush rule ls list crush rules + osd crush rule ls-by-class list all crush rules that reference the + osd crush weight-set ls list crush weight sets + osd erasure-code-profile ls list all erasure code profiles + osd pool ls {detail} list pools + pg ls {} { [...]} list pg with specific pool, osd, state + pg ls-by-osd list pg on osd [osd] + pg ls-by-pool { list pg with pool = [poolname] + pg ls-by-primary list pg with primary = [osd] + rbd task list {} List pending or running asynchronous + restful list-keys List all API keys + #+end_EXAMPLE + + +** rook-tool code block + #+name: rook-tool + #+begin_src shell :var COMMAND="ceph status" + kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') \ + $COMMAND 2>&1 + #ceph df + # rados df + #+end_src + +** ceph config ls + #+begin_src shell + ceph config ls + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + host + fsid + public_addr + public_bind_addr + cluster_addr + public_network + public_network_interface + cluster_network + cluster_network_interface + monmap + mon_host + mon_dns_srv_name + lockdep + lockdep_force_backtrace + run_dir + admin_socket + admin_socket_mode + daemonize + setuser + setgroup + setuser_match_path + pid_file + chdir + fatal_signal_handlers + crash_dir + restapi_log_level + restapi_base_url + erasure_code_dir + log_file + log_max_new + log_max_recent + log_to_file + log_to_stderr + err_to_stderr + log_stderr_prefix + log_to_syslog + err_to_syslog + log_flush_on_exit + log_stop_at_utilization + log_to_graylog + err_to_graylog + log_graylog_host + log_graylog_port + log_coarse_timestamps + clog_to_monitors + clog_to_syslog + clog_to_syslog_level + clog_to_syslog_facility + clog_to_graylog + clog_to_graylog_host + clog_to_graylog_port + mon_cluster_log_to_stderr + mon_cluster_log_to_syslog + mon_cluster_log_to_syslog_level + mon_cluster_log_to_syslog_facility + mon_cluster_log_to_file + mon_cluster_log_file + mon_cluster_log_file_level + mon_cluster_log_to_graylog + mon_cluster_log_to_graylog_host + mon_cluster_log_to_graylog_port + enable_experimental_unrecoverable_data_corrupting_features + plugin_dir + xio_trace_mempool + xio_trace_msgcnt + xio_trace_xcon + xio_queue_depth + xio_mp_min + xio_mp_max_64 + xio_mp_max_256 + xio_mp_max_1k + xio_mp_max_page + xio_mp_max_hint + xio_portal_threads + xio_max_conns_per_portal + xio_transport_type + xio_max_send_inline + compressor_zlib_isal + compressor_zlib_level + qat_compressor_enabled + plugin_crypto_accelerator + mempool_debug + key + keyfile + keyring + heartbeat_interval + heartbeat_file + heartbeat_inject_failure + perf + ms_type + ms_public_type + ms_cluster_type + ms_mon_cluster_mode + ms_mon_service_mode + ms_mon_client_mode + ms_cluster_mode + ms_service_mode + ms_client_mode + ms_learn_addr_from_peer + ms_tcp_nodelay + ms_tcp_rcvbuf + ms_tcp_prefetch_max_size + ms_initial_backoff + ms_max_backoff + ms_crc_data + ms_crc_header + ms_die_on_bad_msg + ms_die_on_unhandled_msg + ms_die_on_old_message + ms_die_on_skipped_message + ms_die_on_bug + ms_dispatch_throttle_bytes + ms_msgr2_sign_messages + ms_msgr2_encrypt_messages + ms_bind_ipv4 + ms_bind_ipv6 + ms_bind_prefer_ipv4 + ms_bind_msgr1 + ms_bind_msgr2 + ms_bind_port_min + ms_bind_port_max + ms_bind_retry_count + ms_bind_retry_delay + ms_bind_before_connect + ms_tcp_listen_backlog + ms_rwthread_stack_bytes + ms_connection_ready_timeout + ms_connection_idle_timeout + ms_pq_max_tokens_per_priority + ms_pq_min_cost + ms_inject_socket_failures + ms_inject_delay_type + ms_inject_delay_msg_type + ms_inject_delay_max + ms_inject_delay_probability + ms_inject_internal_delays + ms_dump_on_send + ms_dump_corrupt_message_level + ms_async_op_threads + ms_async_max_op_threads + ms_async_rdma_device_name + ms_async_rdma_enable_hugepage + ms_async_rdma_buffer_size + ms_async_rdma_send_buffers + ms_async_rdma_receive_buffers + ms_async_rdma_receive_queue_len + ms_async_rdma_support_srq + ms_async_rdma_port_num + ms_async_rdma_polling_us + ms_async_rdma_local_gid + ms_async_rdma_roce_ver + ms_async_rdma_sl + ms_async_rdma_dscp + ms_max_accept_failures + ms_async_rdma_cm + ms_async_rdma_type + ms_dpdk_port_id + ms_dpdk_coremask + ms_dpdk_memory_channel + ms_dpdk_hugepages + ms_dpdk_pmd + ms_dpdk_host_ipv4_addr + ms_dpdk_gateway_ipv4_addr + ms_dpdk_netmask_ipv4_addr + ms_dpdk_lro + ms_dpdk_hw_flow_control + ms_dpdk_hw_queue_weight + ms_dpdk_debug_allow_loopback + ms_dpdk_rx_buffer_count_per_core + inject_early_sigterm + mon_enable_op_tracker + mon_op_complaint_time + mon_op_log_threshold + mon_op_history_size + mon_op_history_duration + mon_op_history_slow_op_size + mon_op_history_slow_op_threshold + mon_data + mon_initial_members + mon_compact_on_start + mon_compact_on_bootstrap + mon_compact_on_trim + mon_osdmap_full_prune_enabled + mon_osdmap_full_prune_min + mon_osdmap_full_prune_interval + mon_osdmap_full_prune_txsize + mon_osd_cache_size + mon_osd_cache_size_min + mon_memory_target + mon_memory_autotune + mon_cpu_threads + mon_osd_mapping_pgs_per_chunk + mon_clean_pg_upmaps_per_chunk + mon_osd_max_creating_pgs + mon_osd_max_initial_pgs + mon_tick_interval + mon_session_timeout + mon_subscribe_interval + mon_delta_reset_interval + mon_osd_laggy_halflife + mon_osd_laggy_weight + mon_osd_laggy_max_interval + mon_osd_adjust_heartbeat_grace + mon_osd_adjust_down_out_interval + mon_osd_auto_mark_in + mon_osd_auto_mark_auto_out_in + mon_osd_auto_mark_new_in + mon_osd_destroyed_out_interval + mon_osd_down_out_interval + mon_osd_down_out_subtree_limit + mon_osd_min_up_ratio + mon_osd_min_in_ratio + mon_osd_warn_op_age + mon_osd_err_op_age_ratio + mon_osd_prime_pg_temp + mon_osd_prime_pg_temp_max_time + mon_osd_prime_pg_temp_max_estimate + mon_stat_smooth_intervals + mon_election_timeout + mon_lease + mon_lease_renew_interval_factor + mon_lease_ack_timeout_factor + mon_accept_timeout_factor + mon_clock_drift_allowed + mon_clock_drift_warn_backoff + mon_timecheck_interval + mon_timecheck_skew_interval + mon_pg_stuck_threshold + mon_pg_warn_min_per_osd + mon_max_pg_per_osd + mon_target_pg_per_osd + mon_pg_warn_max_object_skew + mon_pg_warn_min_objects + mon_pg_warn_min_pool_objects + mon_pg_check_down_all_threshold + mon_cache_target_full_warn_ratio + mon_osd_full_ratio + mon_osd_backfillfull_ratio + mon_osd_nearfull_ratio + mon_osd_initial_require_min_compat_client + mon_allow_pool_delete + mon_fake_pool_delete + mon_globalid_prealloc + mon_osd_report_timeout + mon_warn_on_msgr2_not_enabled + mon_warn_on_legacy_crush_tunables + mon_crush_min_required_version + mon_warn_on_crush_straw_calc_version_zero + mon_warn_on_osd_down_out_interval_zero + mon_warn_on_cache_pools_without_hit_sets + mon_warn_on_pool_no_app + mon_warn_on_misplaced + mon_warn_on_too_few_osds + mon_warn_on_slow_ping_time + mon_warn_on_slow_ping_ratio + mon_max_snap_prune_per_epoch + mon_min_osdmap_epochs + mon_max_log_epochs + mon_max_mdsmap_epochs + mon_max_mgrmap_epochs + mon_max_osd + mon_probe_timeout + mon_client_bytes + mon_daemon_bytes + mon_mgr_proxy_client_bytes_ratio + mon_log_max_summary + mon_max_log_entries_per_event + mon_reweight_min_pgs_per_osd + mon_reweight_min_bytes_per_osd + mon_reweight_max_osds + mon_reweight_max_change + mon_health_to_clog + mon_health_to_clog_interval + mon_health_to_clog_tick_interval + mon_health_max_detail + mon_health_log_update_period + mon_data_avail_crit + mon_data_avail_warn + mon_data_size_warn + mon_warn_pg_not_scrubbed_ratio + mon_warn_pg_not_deep_scrubbed_ratio + mon_scrub_interval + mon_scrub_timeout + mon_scrub_max_keys + mon_scrub_inject_crc_mismatch + mon_scrub_inject_missing_keys + mon_config_key_max_entry_size + mon_sync_timeout + mon_sync_max_payload_size + mon_sync_debug + mon_inject_sync_get_chunk_delay + mon_osd_min_down_reporters + mon_osd_reporter_subtree_level + mon_osd_snap_trim_queue_warn_on + mon_osd_force_trim_to + mon_mds_force_trim_to + mon_mds_skip_sanity + mon_debug_extra_checks + mon_debug_block_osdmap_trim + mon_debug_deprecated_as_obsolete + mon_debug_dump_transactions + mon_debug_dump_json + mon_debug_dump_location + mon_debug_no_require_mimic + mon_debug_no_require_nautilus + mon_debug_no_require_bluestore_for_ec_overwrites + mon_debug_no_initial_persistent_features + mon_inject_transaction_delay_max + mon_inject_transaction_delay_probability + mon_inject_pg_merge_bounce_probability + mon_sync_provider_kill_at + mon_sync_requester_kill_at + mon_force_quorum_join + mon_keyvaluedb + mon_debug_unsafe_allow_tier_with_nonempty_snaps + mon_osd_blacklist_default_expire + mon_mds_blacklist_interval + mon_osd_crush_smoke_test + mon_smart_report_timeout + paxos_stash_full_interval + paxos_max_join_drift + paxos_propose_interval + paxos_min_wait + paxos_min + paxos_trim_min + paxos_trim_max + paxos_service_trim_min + paxos_service_trim_max + paxos_kill_at + auth_cluster_required + auth_service_required + auth_client_required + auth_supported + max_rotating_auth_attempts + rotating_keys_bootstrap_timeout + rotating_keys_renewal_timeout + cephx_require_signatures + cephx_require_version + cephx_cluster_require_signatures + cephx_cluster_require_version + cephx_service_require_signatures + cephx_service_require_version + cephx_sign_messages + auth_mon_ticket_ttl + auth_service_ticket_ttl + auth_debug + mon_client_hunt_parallel + mon_client_hunt_interval + mon_client_ping_interval + mon_client_ping_timeout + mon_client_hunt_interval_backoff + mon_client_hunt_interval_min_multiple + mon_client_hunt_interval_max_multiple + mon_client_max_log_entries_per_message + mon_client_directed_command_retry + mon_max_pool_pg_num + mon_pool_quota_warn_threshold + mon_pool_quota_crit_threshold + crush_location + crush_location_hook + crush_location_hook_timeout + objecter_tick_interval + objecter_timeout + objecter_inflight_op_bytes + objecter_inflight_ops + objecter_completion_locks_per_session + objecter_inject_no_watch_ping + objecter_retry_writes_after_first_reply + objecter_debug_inject_relock_delay + filer_max_purge_ops + filer_max_truncate_ops + journaler_write_head_interval + journaler_prefetch_periods + journaler_prezero_periods + osd_calc_pg_upmaps_aggressively + osd_calc_pg_upmaps_max_stddev + osd_calc_pg_upmaps_local_fallback_retries + osd_numa_prefer_iface + osd_numa_auto_affinity + osd_numa_node + osd_smart_report_timeout + osd_check_max_object_name_len_on_startup + osd_max_backfills + osd_min_recovery_priority + osd_backfill_retry_interval + osd_recovery_retry_interval + osd_agent_max_ops + osd_agent_max_low_ops + osd_agent_min_evict_effort + osd_agent_quantize_effort + osd_agent_delay_time + osd_find_best_info_ignore_history_les + osd_agent_hist_halflife + osd_agent_slop + osd_uuid + osd_data + osd_journal + osd_journal_size + osd_journal_flush_on_shutdown + osd_os_flags + osd_max_write_size + osd_max_pgls + osd_client_message_size_cap + osd_client_message_cap + osd_crush_update_weight_set + osd_crush_chooseleaf_type + osd_pool_use_gmt_hitset + osd_crush_update_on_start + osd_class_update_on_start + osd_crush_initial_weight + osd_pool_default_ec_fast_read + osd_pool_default_crush_rule + osd_pool_erasure_code_stripe_unit + osd_pool_default_size + osd_pool_default_min_size + osd_pool_default_pg_num + osd_pool_default_pgp_num + osd_pool_default_type + osd_pool_default_erasure_code_profile + osd_erasure_code_plugins + osd_allow_recovery_below_min_size + osd_pool_default_flags + osd_pool_default_flag_hashpspool + osd_pool_default_flag_nodelete + osd_pool_default_flag_nopgchange + osd_pool_default_flag_nosizechange + osd_pool_default_hit_set_bloom_fpp + osd_pool_default_cache_target_dirty_ratio + osd_pool_default_cache_target_dirty_high_ratio + osd_pool_default_cache_target_full_ratio + osd_pool_default_cache_min_flush_age + osd_pool_default_cache_min_evict_age + osd_pool_default_cache_max_evict_check_size + osd_pool_default_pg_autoscale_mode + osd_hit_set_min_size + osd_hit_set_max_size + osd_hit_set_namespace + osd_tier_promote_max_objects_sec + osd_tier_promote_max_bytes_sec + osd_tier_default_cache_mode + osd_tier_default_cache_hit_set_count + osd_tier_default_cache_hit_set_period + osd_tier_default_cache_hit_set_type + osd_tier_default_cache_min_read_recency_for_promote + osd_tier_default_cache_min_write_recency_for_promote + osd_tier_default_cache_hit_set_grade_decay_rate + osd_tier_default_cache_hit_set_search_last_n + osd_objecter_finishers + osd_map_dedup + osd_map_cache_size + osd_map_message_max + osd_map_message_max_bytes + osd_map_share_max_epochs + osd_pg_epoch_max_lag_factor + osd_inject_bad_map_crc_probability + osd_inject_failure_on_pg_removal + osd_max_markdown_period + osd_max_markdown_count + osd_op_pq_max_tokens_per_priority + osd_op_pq_min_cost + osd_recover_clone_overlap + osd_op_num_threads_per_shard + osd_op_num_threads_per_shard_hdd + osd_op_num_threads_per_shard_ssd + osd_op_num_shards + osd_op_num_shards_hdd + osd_op_num_shards_ssd + osd_skip_data_digest + osd_op_queue + osd_op_queue_cut_off + osd_op_queue_mclock_client_op_res + osd_op_queue_mclock_client_op_wgt + osd_op_queue_mclock_client_op_lim + osd_op_queue_mclock_osd_rep_op_res + osd_op_queue_mclock_osd_rep_op_wgt + osd_op_queue_mclock_osd_rep_op_lim + osd_op_queue_mclock_snap_res + osd_op_queue_mclock_snap_wgt + osd_op_queue_mclock_snap_lim + osd_op_queue_mclock_recov_res + osd_op_queue_mclock_recov_wgt + osd_op_queue_mclock_recov_lim + osd_op_queue_mclock_scrub_res + osd_op_queue_mclock_scrub_wgt + osd_op_queue_mclock_scrub_lim + osd_op_queue_mclock_anticipation_timeout + osd_op_queue_mclock_pg_delete_res + osd_op_queue_mclock_pg_delete_wgt + osd_op_queue_mclock_pg_delete_lim + osd_op_queue_mclock_peering_event_res + osd_op_queue_mclock_peering_event_wgt + osd_op_queue_mclock_peering_event_lim + osd_ignore_stale_divergent_priors + osd_read_ec_check_for_errors + osd_recover_clone_overlap_limit + osd_debug_feed_pullee + osd_backfill_scan_min + osd_backfill_scan_max + osd_op_thread_timeout + osd_op_thread_suicide_timeout + osd_recovery_sleep + osd_recovery_sleep_hdd + osd_recovery_sleep_ssd + osd_recovery_sleep_hybrid + osd_snap_trim_sleep + osd_snap_trim_sleep_hdd + osd_snap_trim_sleep_ssd + osd_snap_trim_sleep_hybrid + osd_scrub_invalid_stats + osd_command_thread_timeout + osd_command_thread_suicide_timeout + osd_heartbeat_interval + osd_heartbeat_grace + osd_heartbeat_stale + osd_heartbeat_min_peers + osd_heartbeat_use_min_delay_socket + osd_heartbeat_min_size + osd_pg_max_concurrent_snap_trims + osd_max_trimming_pgs + osd_heartbeat_min_healthy_ratio + osd_mon_heartbeat_interval + osd_mon_heartbeat_stat_stale + osd_mon_report_interval + osd_mon_report_max_in_flight + osd_beacon_report_interval + osd_pg_stat_report_interval_max + osd_mon_ack_timeout + osd_stats_ack_timeout_factor + osd_stats_ack_timeout_decay + osd_max_snap_prune_intervals_per_epoch + osd_default_data_pool_replay_window + osd_auto_mark_unfound_lost + osd_recovery_delay_start + osd_recovery_max_active + osd_recovery_max_single_start + osd_recovery_max_chunk + osd_recovery_max_omap_entries_per_chunk + osd_copyfrom_max_chunk + osd_push_per_object_cost + osd_max_push_cost + osd_max_push_objects + osd_max_scrubs + osd_scrub_during_recovery + osd_repair_during_recovery + osd_scrub_begin_hour + osd_scrub_end_hour + osd_scrub_begin_week_day + osd_scrub_end_week_day + osd_scrub_load_threshold + osd_scrub_min_interval + osd_scrub_max_interval + osd_scrub_interval_randomize_ratio + osd_scrub_backoff_ratio + osd_scrub_chunk_min + osd_scrub_chunk_max + osd_scrub_sleep + osd_scrub_auto_repair + osd_scrub_auto_repair_num_errors + osd_scrub_max_preemptions + osd_deep_scrub_interval + osd_deep_scrub_randomize_ratio + osd_deep_scrub_stride + osd_deep_scrub_keys + osd_deep_scrub_update_digest_min_age + osd_deep_scrub_large_omap_object_key_threshold + osd_deep_scrub_large_omap_object_value_sum_threshold + osd_class_dir + osd_open_classes_on_start + osd_class_load_list + osd_class_default_list + osd_check_for_log_corruption + osd_use_stale_snap + osd_rollback_to_cluster_snap + osd_default_notify_timeout + osd_kill_backfill_at + osd_pg_epoch_persisted_max_stale + osd_min_pg_log_entries + osd_max_pg_log_entries + osd_pg_log_dups_tracked + osd_force_recovery_pg_log_entries_factor + osd_pg_log_trim_min + osd_force_auth_primary_missing_objects + osd_async_recovery_min_cost + osd_max_pg_per_osd_hard_ratio + osd_pg_log_trim_max + osd_op_complaint_time + osd_command_max_records + osd_max_pg_blocked_by + osd_op_log_threshold + osd_verify_sparse_read_holes + osd_backoff_on_unfound + osd_backoff_on_degraded + osd_backoff_on_peering + osd_debug_shutdown + osd_debug_crash_on_ignored_backoff + osd_debug_inject_dispatch_delay_probability + osd_debug_inject_dispatch_delay_duration + osd_debug_drop_ping_probability + osd_debug_drop_ping_duration + osd_debug_op_order + osd_debug_verify_missing_on_start + osd_debug_verify_snaps + osd_debug_verify_stray_on_activate + osd_debug_skip_full_check_in_backfill_reservation + osd_debug_reject_backfill_probability + osd_debug_inject_copyfrom_error + osd_debug_misdirected_ops + osd_debug_skip_full_check_in_recovery + osd_debug_random_push_read_error + osd_debug_verify_cached_snaps + osd_debug_deep_scrub_sleep + osd_debug_no_acting_change + osd_debug_no_purge_strays + osd_debug_pretend_recovery_active + osd_enable_op_tracker + osd_num_op_tracker_shard + osd_op_history_size + osd_op_history_duration + osd_op_history_slow_op_size + osd_op_history_slow_op_threshold + osd_target_transaction_size + osd_delete_sleep + osd_delete_sleep_hdd + osd_delete_sleep_ssd + osd_delete_sleep_hybrid + osd_failsafe_full_ratio + osd_fast_fail_on_connection_refused + osd_pg_object_context_cache_count + osd_tracing + osd_function_tracing + osd_fast_info + osd_debug_pg_log_writeout + osd_loop_before_reset_tphandle + threadpool_default_timeout + threadpool_empty_queue_max_wait + leveldb_log_to_ceph_log + leveldb_write_buffer_size + leveldb_cache_size + leveldb_block_size + leveldb_bloom_size + leveldb_max_open_files + leveldb_compression + leveldb_paranoid + leveldb_log + leveldb_compact_on_mount + kinetic_host + kinetic_port + kinetic_user_id + kinetic_hmac_key + kinetic_use_ssl + rocksdb_log_to_ceph_log + rocksdb_cache_size + rocksdb_cache_row_ratio + rocksdb_cache_shard_bits + rocksdb_cache_type + rocksdb_block_size + rocksdb_perf + rocksdb_collect_compaction_stats + rocksdb_collect_extended_stats + rocksdb_collect_memory_stats + rocksdb_enable_rmrange + rocksdb_max_items_rmrange + rocksdb_bloom_bits_per_key + rocksdb_cache_index_and_filter_blocks + rocksdb_cache_index_and_filter_blocks_with_high_priority + rocksdb_pin_l0_filter_and_index_blocks_in_cache + rocksdb_index_type + rocksdb_partition_filters + rocksdb_metadata_block_size + mon_rocksdb_options + osd_client_op_priority + osd_recovery_op_priority + osd_peering_op_priority + osd_snap_trim_priority + osd_snap_trim_cost + osd_pg_delete_priority + osd_pg_delete_cost + osd_scrub_priority + osd_scrub_cost + osd_requested_scrub_priority + osd_recovery_priority + osd_recovery_cost + osd_recovery_op_warn_multiple + osd_mon_shutdown_timeout + osd_shutdown_pgref_assert + osd_max_object_size + osd_max_object_name_len + osd_max_object_namespace_len + osd_max_attr_name_len + osd_max_attr_size + osd_max_omap_entries_per_request + osd_max_omap_bytes_per_request + osd_objectstore + osd_objectstore_tracing + osd_objectstore_fuse + osd_bench_small_size_max_iops + osd_bench_large_size_max_throughput + osd_bench_max_block_size + osd_bench_duration + osd_blkin_trace_all + osdc_blkin_trace_all + osd_discard_disconnected_ops + osd_memory_target + osd_memory_target_cgroup_limit_ratio + osd_memory_base + osd_memory_expected_fragmentation + osd_memory_cache_min + osd_memory_cache_resize_interval + memstore_device_bytes + memstore_page_set + memstore_page_size + objectstore_blackhole + bdev_debug_inflight_ios + bdev_inject_crash + bdev_inject_crash_flush_delay + bdev_aio + bdev_aio_poll_ms + bdev_aio_max_queue_depth + bdev_aio_reap_max + bdev_block_size + bdev_debug_aio + bdev_debug_aio_suicide_timeout + bdev_debug_aio_log_age + bdev_nvme_unbind_from_kernel + bdev_nvme_retry_count + bdev_enable_discard + bdev_async_discard + bluefs_alloc_size + bluefs_shared_alloc_size + bluefs_max_prefetch + bluefs_min_log_runway + bluefs_max_log_runway + bluefs_log_compact_min_ratio + bluefs_log_compact_min_size + bluefs_min_flush_size + bluefs_compact_log_sync + bluefs_buffered_io + bluefs_sync_write + bluefs_allocator + bluefs_preextend_wal_files + bluestore_bluefs + bluestore_bluefs_env_mirror + bluestore_bluefs_min + bluestore_bluefs_min_free + bluestore_bluefs_min_ratio + bluestore_bluefs_max_ratio + bluestore_bluefs_gift_ratio + bluestore_bluefs_reclaim_ratio + bluestore_bluefs_balance_interval + bluestore_bluefs_alloc_failure_dump_interval + bluestore_bluefs_db_compatibility + bluestore_spdk_mem + bluestore_spdk_coremask + bluestore_spdk_max_io_completion + bluestore_spdk_io_sleep + bluestore_block_path + bluestore_block_size + bluestore_block_create + bluestore_block_db_path + bluestore_block_db_size + bluestore_block_db_create + bluestore_block_wal_path + bluestore_block_wal_size + bluestore_block_wal_create + bluestore_block_preallocate_file + bluestore_ignore_data_csum + bluestore_csum_type + bluestore_retry_disk_reads + bluestore_min_alloc_size + bluestore_min_alloc_size_hdd + bluestore_min_alloc_size_ssd + bluestore_max_alloc_size + bluestore_prefer_deferred_size + bluestore_prefer_deferred_size_hdd + bluestore_prefer_deferred_size_ssd + bluestore_compression_mode + bluestore_compression_algorithm + bluestore_compression_min_blob_size + bluestore_compression_min_blob_size_hdd + bluestore_compression_min_blob_size_ssd + bluestore_compression_max_blob_size + bluestore_compression_max_blob_size_hdd + bluestore_compression_max_blob_size_ssd + bluestore_gc_enable_blob_threshold + bluestore_gc_enable_total_threshold + bluestore_max_blob_size + bluestore_max_blob_size_hdd + bluestore_max_blob_size_ssd + bluestore_compression_required_ratio + bluestore_extent_map_shard_max_size + bluestore_extent_map_shard_target_size + bluestore_extent_map_shard_min_size + bluestore_extent_map_shard_target_size_slop + bluestore_extent_map_inline_shard_prealloc_size + bluestore_cache_trim_interval + bluestore_cache_trim_max_skip_pinned + bluestore_cache_type + bluestore_2q_cache_kin_ratio + bluestore_2q_cache_kout_ratio + bluestore_cache_size + bluestore_cache_size_hdd + bluestore_cache_size_ssd + bluestore_cache_meta_ratio + bluestore_cache_kv_ratio + bluestore_cache_autotune + bluestore_cache_autotune_interval + bluestore_kvbackend + bluestore_allocator + bluestore_freelist_blocks_per_key + bluestore_bitmapallocator_blocks_per_zone + bluestore_bitmapallocator_span_size + bluestore_max_deferred_txc + bluestore_rocksdb_options + bluestore_rocksdb_cf + bluestore_rocksdb_cfs + bluestore_fsck_on_mount + bluestore_fsck_on_mount_deep + bluestore_fsck_quick_fix_on_mount + bluestore_fsck_on_umount + bluestore_fsck_on_umount_deep + bluestore_fsck_on_mkfs + bluestore_fsck_on_mkfs_deep + bluestore_sync_submit_transaction + bluestore_fsck_read_bytes_cap + bluestore_fsck_quick_fix_threads + bluestore_throttle_bytes + bluestore_throttle_deferred_bytes + bluestore_throttle_cost_per_io + bluestore_throttle_cost_per_io_hdd + bluestore_throttle_cost_per_io_ssd + bluestore_deferred_batch_ops + bluestore_deferred_batch_ops_hdd + bluestore_deferred_batch_ops_ssd + bluestore_nid_prealloc + bluestore_blobid_prealloc + bluestore_clone_cow + bluestore_default_buffered_read + bluestore_default_buffered_write + bluestore_debug_misc + bluestore_debug_no_reuse_blocks + bluestore_debug_small_allocations + bluestore_debug_freelist + bluestore_debug_prefill + bluestore_debug_prefragment_max + bluestore_debug_inject_read_err + bluestore_debug_randomize_serial_transaction + bluestore_debug_omit_block_device_write + bluestore_debug_fsck_abort + bluestore_debug_omit_kv_commit + bluestore_debug_permit_any_bdev_label + bluestore_debug_random_read_err + bluestore_debug_inject_bug21040 + bluestore_debug_inject_csum_err_probability + bluestore_fsck_error_on_no_per_pool_stats + bluestore_warn_on_bluefs_spillover + bluestore_warn_on_legacy_statfs + bluestore_log_op_age + bluestore_log_omap_iterator_age + bluestore_log_collection_list_age + kstore_max_ops + kstore_max_bytes + kstore_backend + kstore_rocksdb_options + kstore_fsck_on_mount + kstore_fsck_on_mount_deep + kstore_nid_prealloc + kstore_sync_transaction + kstore_sync_submit_transaction + kstore_onode_map_size + kstore_default_stripe_size + filestore_rocksdb_options + filestore_omap_backend + filestore_omap_backend_path + filestore_wbthrottle_enable + filestore_wbthrottle_btrfs_bytes_start_flusher + filestore_wbthrottle_btrfs_bytes_hard_limit + filestore_wbthrottle_btrfs_ios_start_flusher + filestore_wbthrottle_btrfs_ios_hard_limit + filestore_wbthrottle_btrfs_inodes_start_flusher + filestore_wbthrottle_xfs_bytes_start_flusher + filestore_wbthrottle_xfs_bytes_hard_limit + filestore_wbthrottle_xfs_ios_start_flusher + filestore_wbthrottle_xfs_ios_hard_limit + filestore_wbthrottle_xfs_inodes_start_flusher + filestore_wbthrottle_btrfs_inodes_hard_limit + filestore_wbthrottle_xfs_inodes_hard_limit + filestore_odsync_write + filestore_index_retry_probability + filestore_debug_inject_read_err + filestore_debug_random_read_err + filestore_debug_omap_check + filestore_omap_header_cache_size + filestore_max_inline_xattr_size + filestore_max_inline_xattr_size_xfs + filestore_max_inline_xattr_size_btrfs + filestore_max_inline_xattr_size_other + filestore_max_inline_xattrs + filestore_max_inline_xattrs_xfs + filestore_max_inline_xattrs_btrfs + filestore_max_inline_xattrs_other + filestore_max_xattr_value_size + filestore_max_xattr_value_size_xfs + filestore_max_xattr_value_size_btrfs + filestore_max_xattr_value_size_other + filestore_sloppy_crc + filestore_sloppy_crc_block_size + filestore_max_alloc_hint_size + filestore_max_sync_interval + filestore_min_sync_interval + filestore_btrfs_snap + filestore_btrfs_clone_range + filestore_zfs_snap + filestore_fsync_flushes_journal_data + filestore_fiemap + filestore_punch_hole + filestore_seek_data_hole + filestore_splice + filestore_fadvise + filestore_collect_device_partition_information + filestore_xfs_extsize + filestore_journal_parallel + filestore_journal_writeahead + filestore_journal_trailing + filestore_queue_max_ops + filestore_queue_max_bytes + filestore_caller_concurrency + filestore_expected_throughput_bytes + filestore_expected_throughput_ops + filestore_queue_max_delay_multiple + filestore_queue_high_delay_multiple + filestore_queue_max_delay_multiple_bytes + filestore_queue_high_delay_multiple_bytes + filestore_queue_max_delay_multiple_ops + filestore_queue_high_delay_multiple_ops + filestore_queue_low_threshhold + filestore_queue_high_threshhold + filestore_op_threads + filestore_op_thread_timeout + filestore_op_thread_suicide_timeout + filestore_commit_timeout + filestore_fiemap_threshold + filestore_merge_threshold + filestore_split_multiple + filestore_split_rand_factor + filestore_update_to + filestore_blackhole + filestore_fd_cache_size + filestore_fd_cache_shards + filestore_ondisk_finisher_threads + filestore_apply_finisher_threads + filestore_dump_file + filestore_kill_at + filestore_inject_stall + filestore_fail_eio + filestore_debug_verify_split + journal_dio + journal_aio + journal_force_aio + journal_block_size + journal_block_align + journal_write_header_frequency + journal_max_write_bytes + journal_max_write_entries + journal_throttle_low_threshhold + journal_throttle_high_threshhold + journal_throttle_high_multiple + journal_throttle_max_multiple + journal_align_min_size + journal_replay_from + mgr_stats_threshold + journal_zero_on_create + journal_ignore_corruption + journal_discard + fio_dir + rados_mon_op_timeout + rados_osd_op_timeout + rados_tracing + nss_db_path + mgr_module_path + mgr_initial_modules + mgr_data + mgr_tick_period + mgr_stats_period + mgr_client_bytes + mgr_client_messages + mgr_osd_bytes + mgr_osd_messages + mgr_mds_bytes + mgr_mds_messages + mgr_mon_bytes + mgr_mon_messages + mgr_connect_retry_interval + mgr_service_beacon_grace + mgr_client_service_daemon_unregister_timeout + mgr_debug_aggressive_pg_num_changes + mon_mgr_digest_period + mon_mgr_beacon_grace + mon_mgr_inactive_grace + mon_mgr_mkfs_grace + throttler_perf_counter + event_tracing + debug_deliberately_leak_memory + debug_asserts_on_shutdown + debug_asok_assert_abort + target_max_misplaced_ratio + device_failure_prediction_mode + gss_ktab_client_file + gss_target_name + debug_disable_randomized_ping + debug_heartbeat_testing_span + rgw_acl_grants_max_num + rgw_cors_rules_max_num + rgw_delete_multi_obj_max_num + rgw_website_routing_rules_max_num + rgw_rados_tracing + rgw_op_tracing + rgw_max_chunk_size + rgw_put_obj_min_window_size + rgw_put_obj_max_window_size + rgw_max_put_size + rgw_max_put_param_size + rgw_max_attr_size + rgw_max_attr_name_len + rgw_max_attrs_num_in_req + rgw_override_bucket_index_max_shards + rgw_bucket_index_max_aio + rgw_enable_quota_threads + rgw_enable_gc_threads + rgw_enable_lc_threads + rgw_data + rgw_enable_apis + rgw_cache_enabled + rgw_cache_lru_size + rgw_socket_path + rgw_host + rgw_port + rgw_dns_name + rgw_dns_s3website_name + rgw_service_provider_name + rgw_content_length_compat + rgw_relaxed_region_enforcement + rgw_lifecycle_work_time + rgw_lc_lock_max_time + rgw_lc_thread_delay + rgw_lc_max_objs + rgw_lc_max_rules + rgw_lc_debug_interval + rgw_mp_lock_max_time + rgw_script_uri + rgw_request_uri + rgw_ignore_get_invalid_range + rgw_swift_url + rgw_swift_url_prefix + rgw_swift_auth_url + rgw_swift_auth_entry + rgw_swift_tenant_name + rgw_swift_account_in_url + rgw_swift_enforce_content_length + rgw_keystone_url + rgw_keystone_admin_token + rgw_keystone_admin_token_path + rgw_keystone_admin_user + rgw_keystone_admin_password + rgw_keystone_admin_password_path + rgw_keystone_admin_tenant + rgw_keystone_admin_project + rgw_keystone_admin_domain + rgw_keystone_barbican_user + rgw_keystone_barbican_password + rgw_keystone_barbican_tenant + rgw_keystone_barbican_project + rgw_keystone_barbican_domain + rgw_keystone_api_version + rgw_keystone_accepted_roles + rgw_keystone_accepted_admin_roles + rgw_keystone_token_cache_size + rgw_keystone_revocation_interval + rgw_keystone_verify_ssl + rgw_keystone_implicit_tenants + rgw_cross_domain_policy + rgw_healthcheck_disabling_path + rgw_s3_auth_use_rados + rgw_s3_auth_use_keystone + rgw_s3_auth_order + rgw_barbican_url + rgw_ldap_uri + rgw_ldap_binddn + rgw_ldap_searchdn + rgw_ldap_dnattr + rgw_ldap_secret + rgw_s3_auth_use_ldap + rgw_ldap_searchfilter + rgw_opa_url + rgw_opa_token + rgw_opa_verify_ssl + rgw_use_opa_authz + rgw_admin_entry + rgw_enforce_swift_acls + rgw_swift_token_expiration + rgw_print_continue + rgw_print_prohibited_content_length + rgw_remote_addr_param + rgw_op_thread_timeout + rgw_op_thread_suicide_timeout + rgw_thread_pool_size + rgw_num_control_oids + rgw_num_rados_handles + rgw_verify_ssl + rgw_nfs_lru_lanes + rgw_nfs_lru_lane_hiwat + rgw_nfs_fhcache_partitions + rgw_nfs_fhcache_size + rgw_nfs_namespace_expire_secs + rgw_nfs_max_gc + rgw_nfs_write_completion_interval_s + rgw_nfs_s3_fast_attrs + rgw_rados_pool_autoscale_bias + rgw_rados_pool_pg_num_min + rgw_zone + rgw_zone_root_pool + rgw_default_zone_info_oid + rgw_region + rgw_region_root_pool + rgw_default_region_info_oid + rgw_zonegroup + rgw_zonegroup_root_pool + rgw_default_zonegroup_info_oid + rgw_realm + rgw_realm_root_pool + rgw_default_realm_info_oid + rgw_period_root_pool + rgw_period_latest_epoch_info_oid + rgw_log_nonexistent_bucket + rgw_log_object_name + rgw_log_object_name_utc + rgw_usage_max_shards + rgw_usage_max_user_shards + rgw_enable_ops_log + rgw_enable_usage_log + rgw_ops_log_rados + rgw_ops_log_socket_path + rgw_ops_log_data_backlog + rgw_fcgi_socket_backlog + rgw_usage_log_flush_threshold + rgw_usage_log_tick_interval + rgw_init_timeout + rgw_mime_types_file + rgw_gc_max_objs + rgw_gc_obj_min_wait + rgw_gc_processor_max_time + rgw_gc_processor_period + rgw_gc_max_concurrent_io + rgw_gc_max_trim_chunk + rgw_s3_success_create_obj_status + rgw_resolve_cname + rgw_obj_stripe_size + rgw_extended_http_attrs + rgw_exit_timeout_secs + rgw_get_obj_window_size + rgw_get_obj_max_req_size + rgw_relaxed_s3_bucket_names + rgw_defer_to_bucket_acls + rgw_list_buckets_max_chunk + rgw_md_log_max_shards + rgw_curl_wait_timeout_ms + rgw_curl_low_speed_limit + rgw_curl_low_speed_time + rgw_copy_obj_progress + rgw_copy_obj_progress_every_bytes + rgw_obj_tombstone_cache_size + rgw_data_log_window + rgw_data_log_changes_size + rgw_data_log_num_shards + rgw_data_log_obj_prefix + rgw_bucket_quota_ttl + rgw_bucket_quota_soft_threshold + rgw_bucket_quota_cache_size + rgw_bucket_default_quota_max_objects + rgw_bucket_default_quota_max_size + rgw_expose_bucket + rgw_frontends + rgw_user_quota_bucket_sync_interval + rgw_user_quota_sync_interval + rgw_user_quota_sync_idle_users + rgw_user_quota_sync_wait_time + rgw_user_default_quota_max_objects + rgw_user_default_quota_max_size + rgw_multipart_min_part_size + rgw_multipart_part_upload_limit + rgw_max_slo_entries + rgw_olh_pending_timeout_sec + rgw_user_max_buckets + rgw_objexp_gc_interval + rgw_objexp_hints_num_shards + rgw_objexp_chunk_size + rgw_enable_static_website + rgw_user_unique_email + rgw_log_http_headers + rgw_num_async_rados_threads + rgw_md_notify_interval_msec + rgw_run_sync_thread + rgw_sync_lease_period + rgw_sync_log_trim_interval + rgw_sync_log_trim_max_buckets + rgw_sync_log_trim_min_cold_buckets + rgw_sync_log_trim_concurrent_buckets + rgw_sync_data_inject_err_probability + rgw_sync_meta_inject_err_probability + rgw_sync_trace_history_size + rgw_sync_trace_per_node_log_size + rgw_sync_trace_servicemap_update_interval + rgw_period_push_interval + rgw_period_push_interval_max + rgw_safe_max_objects_per_shard + rgw_shard_warning_threshold + rgw_swift_versioning_enabled + rgw_swift_custom_header + rgw_swift_need_stats + rgw_reshard_num_logs + rgw_reshard_bucket_lock_duration + rgw_reshard_batch_size + rgw_reshard_max_aio + rgw_trust_forwarded_https + rgw_crypt_require_ssl + rgw_crypt_default_encryption_key + rgw_crypt_s3_kms_encryption_keys + rgw_crypt_suppress_logs + rgw_list_bucket_min_readahead + rgw_rest_getusage_op_compat + rgw_torrent_flag + rgw_torrent_tracker + rgw_torrent_createby + rgw_torrent_comment + rgw_torrent_encoding + rgw_data_notify_interval_msec + rgw_torrent_origin + rgw_torrent_sha_unit + rgw_dynamic_resharding + rgw_max_objs_per_shard + rgw_reshard_thread_interval + rgw_cache_expiry_interval + rgw_inject_notify_timeout_probability + rgw_max_notify_retries + rgw_sts_entry + rgw_sts_key + rgw_s3_auth_use_sts + rgw_sts_max_session_duration + rgw_max_listing_results + rgw_sts_token_introspection_url + rgw_sts_client_id + rgw_sts_client_secret + rgw_max_concurrent_requests + rgw_scheduler_type + rgw_dmclock_admin_res + rgw_dmclock_admin_wgt + rgw_dmclock_admin_lim + rgw_dmclock_auth_res + rgw_dmclock_auth_wgt + rgw_dmclock_auth_lim + rgw_dmclock_data_res + rgw_dmclock_data_wgt + rgw_dmclock_data_lim + rgw_dmclock_metadata_res + rgw_dmclock_metadata_wgt + rgw_dmclock_metadata_lim + rbd_default_pool + rbd_default_data_pool + rbd_default_features + rbd_op_threads + rbd_op_thread_timeout + rbd_non_blocking_aio + rbd_cache + rbd_cache_writethrough_until_flush + rbd_cache_size + rbd_cache_max_dirty + rbd_cache_target_dirty + rbd_cache_max_dirty_age + rbd_cache_max_dirty_object + rbd_cache_block_writes_upfront + rbd_concurrent_management_ops + rbd_balance_snap_reads + rbd_localize_snap_reads + rbd_balance_parent_reads + rbd_localize_parent_reads + rbd_sparse_read_threshold_bytes + rbd_readahead_trigger_requests + rbd_readahead_max_bytes + rbd_readahead_disable_after_bytes + rbd_clone_copy_on_read + rbd_blacklist_on_break_lock + rbd_blacklist_expire_seconds + rbd_request_timed_out_seconds + rbd_skip_partial_discard + rbd_discard_granularity_bytes + rbd_enable_alloc_hint + rbd_tracing + rbd_blkin_trace_all + rbd_validate_pool + rbd_validate_names + rbd_auto_exclusive_lock_until_manual_request + rbd_move_to_trash_on_remove + rbd_move_to_trash_on_remove_expire_seconds + rbd_mirroring_resync_after_disconnect + rbd_mirroring_delete_delay + rbd_mirroring_replay_delay + rbd_default_format + rbd_default_order + rbd_default_stripe_count + rbd_default_stripe_unit + rbd_default_map_options + rbd_default_clone_format + rbd_journal_order + rbd_journal_splay_width + rbd_journal_commit_age + rbd_journal_object_writethrough_until_flush + rbd_journal_object_flush_interval + rbd_journal_object_flush_bytes + rbd_journal_object_flush_age + rbd_journal_object_max_in_flight_appends + rbd_journal_pool + rbd_journal_max_payload_bytes + rbd_journal_max_concurrent_object_sets + rbd_qos_iops_limit + rbd_qos_bps_limit + rbd_qos_read_iops_limit + rbd_qos_write_iops_limit + rbd_qos_read_bps_limit + rbd_qos_write_bps_limit + rbd_qos_iops_burst + rbd_qos_bps_burst + rbd_qos_read_iops_burst + rbd_qos_write_iops_burst + rbd_qos_read_bps_burst + rbd_qos_write_bps_burst + rbd_qos_schedule_tick_min + rbd_discard_on_zeroed_write_same + rbd_mtime_update_interval + rbd_atime_update_interval + rbd_mirror_journal_commit_age + rbd_mirror_journal_poll_age + rbd_mirror_journal_max_fetch_bytes + rbd_mirror_sync_point_update_age + rbd_mirror_concurrent_image_syncs + rbd_mirror_pool_replayers_refresh_interval + rbd_mirror_concurrent_image_deletions + rbd_mirror_delete_retry_interval + rbd_mirror_image_state_check_interval + rbd_mirror_leader_heartbeat_interval + rbd_mirror_leader_max_missed_heartbeats + rbd_mirror_leader_max_acquire_attempts_before_break + rbd_mirror_image_policy_type + rbd_mirror_image_policy_migration_throttle + rbd_mirror_image_policy_update_throttle_interval + rbd_mirror_image_policy_rebalance_timeout + rbd_mirror_perf_stats_prio + mds_data + mds_max_xattr_pairs_size + mds_cache_trim_interval + mds_cache_size + mds_cache_memory_limit + mds_cache_reservation + mds_health_cache_threshold + mds_cache_mid + mds_cache_trim_decay_rate + mds_cache_trim_threshold + mds_max_file_recover + mds_dir_max_commit_size + mds_dir_keys_per_op + mds_decay_halflife + mds_beacon_interval + mds_beacon_grace + mds_heartbeat_grace + mds_enforce_unique_name + mds_session_blacklist_on_timeout + mds_session_blacklist_on_evict + mds_sessionmap_keys_per_op + mds_recall_max_caps + mds_recall_max_decay_rate + mds_recall_max_decay_threshold + mds_recall_global_max_decay_threshold + mds_recall_warning_threshold + mds_recall_warning_decay_rate + mds_freeze_tree_timeout + mds_health_summarize_threshold + mds_reconnect_timeout + mds_tick_interval + mds_dirstat_min_interval + mds_scatter_nudge_interval + mds_client_prealloc_inos + mds_early_reply + mds_default_dir_hash + mds_log_pause + mds_log_skip_corrupt_events + mds_log_max_events + mds_log_events_per_segment + mds_log_segment_size + mds_log_max_segments + mds_bal_export_pin + mds_bal_sample_interval + mds_bal_replicate_threshold + mds_bal_unreplicate_threshold + mds_bal_split_size + mds_bal_split_rd + mds_bal_split_wr + mds_bal_split_bits + mds_bal_merge_size + mds_bal_interval + mds_bal_fragment_interval + mds_bal_fragment_size_max + mds_bal_fragment_fast_factor + mds_bal_fragment_dirs + mds_bal_idle_threshold + mds_bal_max + mds_bal_max_until + mds_bal_mode + mds_bal_min_rebalance + mds_bal_min_start + mds_bal_need_min + mds_bal_need_max + mds_bal_midchunk + mds_bal_minchunk + mds_bal_target_decay + mds_replay_interval + mds_shutdown_check + mds_thrash_exports + mds_thrash_fragments + mds_dump_cache_on_map + mds_dump_cache_after_rejoin + mds_verify_scatter + mds_debug_scatterstat + mds_debug_frag + mds_debug_auth_pins + mds_debug_subtrees + mds_kill_mdstable_at + mds_max_export_size + mds_kill_export_at + mds_kill_import_at + mds_kill_link_at + mds_kill_rename_at + mds_kill_openc_at + mds_kill_journal_at + mds_kill_journal_expire_at + mds_kill_journal_replay_at + mds_journal_format + mds_kill_create_at + mds_inject_traceless_reply_probability + mds_wipe_sessions + mds_wipe_ino_prealloc + mds_skip_ino + mds_enable_op_tracker + mds_op_history_size + mds_op_history_duration + mds_op_complaint_time + mds_op_log_threshold + mds_snap_min_uid + mds_snap_max_uid + mds_snap_rstat + mds_verify_backtrace + mds_max_completed_flushes + mds_max_completed_requests + mds_action_on_write_error + mds_mon_shutdown_timeout + mds_max_purge_files + mds_max_purge_ops + mds_max_purge_ops_per_pg + mds_purge_queue_busy_flush_period + mds_root_ino_uid + mds_root_ino_gid + mds_max_scrub_ops_in_progress + mds_damage_table_max_entries + mds_client_writeable_range_max_inc_objs + mds_min_caps_per_client + mds_max_caps_per_client + mds_hack_allow_loading_invalid_metadata + mds_defer_session_stale + mds_inject_migrator_session_race + mds_request_load_average_decay_rate + mds_cap_revoke_eviction_timeout + mds_max_retries_on_remount_failure + mds_dump_cache_threshold_formatter + mds_dump_cache_threshold_file + client_cache_size + client_cache_mid + client_use_random_mds + client_mount_timeout + client_tick_interval + client_trace + client_readahead_min + client_readahead_max_bytes + client_readahead_max_periods + client_reconnect_stale + client_snapdir + client_mountpoint + client_mount_uid + client_mount_gid + client_notify_timeout + osd_client_watch_timeout + client_caps_release_delay + client_quota_df + client_oc + client_oc_size + client_oc_max_dirty + client_oc_target_dirty + client_oc_max_dirty_age + client_oc_max_objects + client_debug_getattr_caps + client_debug_force_sync_read + client_debug_inject_tick_delay + client_max_inline_size + client_inject_release_failure + client_inject_fixed_oldest_tid + client_metadata + client_acl_type + client_permissions + client_dirsize_rbytes + client_force_lazyio + fuse_use_invalidate_cb + fuse_disable_pagecache + fuse_allow_other + fuse_default_permissions + fuse_big_writes + fuse_max_write + fuse_atomic_o_trunc + fuse_debug + fuse_multithreaded + fuse_require_active_mds + fuse_syncfs_on_mksnap + fuse_set_user_groups + client_try_dentry_invalidate + client_die_on_failed_remount + client_die_on_failed_dentry_invalidate + client_check_pool_perm + client_use_faked_inos + client_mds_namespace + fake_statfs_for_testing + debug_allow_any_pool_priority + mgr/ansible/password + mgr/ansible/server_url + mgr/ansible/username + mgr/ansible/verify_server + mgr/balancer/active + mgr/balancer/begin_time + mgr/balancer/begin_weekday + mgr/balancer/crush_compat_max_iterations + mgr/balancer/crush_compat_metrics + mgr/balancer/crush_compat_step + mgr/balancer/end_time + mgr/balancer/end_weekday + mgr/balancer/min_score + mgr/balancer/mode + mgr/balancer/pool_ids + mgr/balancer/sleep_interval + mgr/balancer/upmap_max_deviation + mgr/balancer/upmap_max_iterations + mgr/crash/retain_interval + mgr/crash/warn_recent_interval + mgr/dashboard/ALERTMANAGER_API_HOST + mgr/dashboard/AUDIT_API_ENABLED + mgr/dashboard/AUDIT_API_LOG_PAYLOAD + mgr/dashboard/ENABLE_BROWSABLE_API + mgr/dashboard/FEATURE_TOGGLE_cephfs + mgr/dashboard/FEATURE_TOGGLE_iscsi + mgr/dashboard/FEATURE_TOGGLE_mirroring + mgr/dashboard/FEATURE_TOGGLE_rbd + mgr/dashboard/FEATURE_TOGGLE_rgw + mgr/dashboard/GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE + mgr/dashboard/GRAFANA_API_PASSWORD + mgr/dashboard/GRAFANA_API_URL + mgr/dashboard/GRAFANA_API_USERNAME + mgr/dashboard/GRAFANA_UPDATE_DASHBOARDS + mgr/dashboard/ISCSI_API_SSL_VERIFICATION + mgr/dashboard/PROMETHEUS_API_HOST + mgr/dashboard/REST_REQUESTS_TIMEOUT + mgr/dashboard/RGW_API_ACCESS_KEY + mgr/dashboard/RGW_API_ADMIN_RESOURCE + mgr/dashboard/RGW_API_HOST + mgr/dashboard/RGW_API_PORT + mgr/dashboard/RGW_API_SCHEME + mgr/dashboard/RGW_API_SECRET_KEY + mgr/dashboard/RGW_API_SSL_VERIFY + mgr/dashboard/RGW_API_USER_ID + mgr/dashboard/crt_file + mgr/dashboard/jwt_token_ttl + mgr/dashboard/key_file + mgr/dashboard/password + mgr/dashboard/server_addr + mgr/dashboard/server_port + mgr/dashboard/ssl + mgr/dashboard/ssl_server_port + mgr/dashboard/standby_behaviour + mgr/dashboard/standby_error_status_code + mgr/dashboard/url_prefix + mgr/dashboard/username + mgr/deepsea/salt_api_eauth + mgr/deepsea/salt_api_password + mgr/deepsea/salt_api_url + mgr/deepsea/salt_api_username + mgr/devicehealth/enable_monitoring + mgr/devicehealth/mark_out_threshold + mgr/devicehealth/pool_name + mgr/devicehealth/retention_period + mgr/devicehealth/scrape_frequency + mgr/devicehealth/self_heal + mgr/devicehealth/sleep_interval + mgr/devicehealth/warn_threshold + mgr/diskprediction_local/predict_interval + mgr/diskprediction_local/sleep_interval + mgr/influx/batch_size + mgr/influx/database + mgr/influx/hostname + mgr/influx/interval + mgr/influx/password + mgr/influx/port + mgr/influx/ssl + mgr/influx/threads + mgr/influx/username + mgr/influx/verify_ssl + mgr/localpool/failure_domain + mgr/localpool/min_size + mgr/localpool/num_rep + mgr/localpool/pg_num + mgr/localpool/prefix + mgr/localpool/subtree + mgr/orchestrator_cli/orchestrator + mgr/pg_autoscaler/sleep_interval + mgr/progress/max_completed_events + mgr/progress/persist_interval + mgr/prometheus/rbd_stats_pools + mgr/prometheus/rbd_stats_pools_refresh_interval + mgr/prometheus/scrape_interval + mgr/prometheus/server_addr + mgr/prometheus/server_port + mgr/restful/key_file + mgr/restful/server_addr + mgr/restful/server_port + mgr/selftest/roption1 + mgr/selftest/roption2 + mgr/selftest/rwoption1 + mgr/selftest/rwoption2 + mgr/selftest/rwoption3 + mgr/selftest/rwoption4 + mgr/selftest/rwoption5 + mgr/selftest/rwoption6 + mgr/selftest/testkey + mgr/selftest/testlkey + mgr/selftest/testnewline + mgr/ssh/inventory_cache_timeout_min + mgr/ssh/ssh_config_file + mgr/telegraf/address + mgr/telegraf/interval + mgr/telemetry/channel_basic + mgr/telemetry/channel_crash + mgr/telemetry/channel_device + mgr/telemetry/channel_ident + mgr/telemetry/contact + mgr/telemetry/description + mgr/telemetry/device_url + mgr/telemetry/enabled + mgr/telemetry/interval + mgr/telemetry/last_opt_revision + mgr/telemetry/leaderboard + mgr/telemetry/organization + mgr/telemetry/proxy + mgr/telemetry/url + mgr/zabbix/identifier + mgr/zabbix/interval + mgr/zabbix/zabbix_host + mgr/zabbix/zabbix_port + mgr/zabbix/zabbix_sender + #+end_EXAMPLE + +** ceph config-key ls + #+name: ceph config-key ls + #+call: rook-tool("ceph config-key ls") + + #+RESULTS: ceph config-key ls + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + [ + "config-history/1/", + "config-history/2/", + "config-history/2/+mon_allow_pool_delete", + "config-history/3/", + "config-history/3/+rbd_default_features", + "config-history/4/", + "config-history/4/+mgr/mgr/orchestrator_cli/orchestrator", + "config-history/5/", + "config-history/5/+mgr.a/mgr/dashboard/ssl", + "config-history/6/", + "config-history/6/+mgr.a/mgr/dashboard/server_port", + "config/mgr.a/mgr/dashboard/server_port", + "config/mgr.a/mgr/dashboard/ssl", + "config/mgr/mgr/orchestrator_cli/orchestrator", + "config/mon_allow_pool_delete", + "config/rbd_default_features", + "mgr/dashboard/accessdb_v1", + "mgr/dashboard/jwt_secret" + ] + #+end_EXAMPLE + +** ceph fs ls + #+name: ceph fs ls + #+call: rook-tool("ceph fs ls") + + #+RESULTS: ceph fs ls + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + name: iifs, metadata pool: iifs-metadata, data pools: [iifs-data0 ] + #+end_EXAMPLE + +** ceph fs volume ls + Trying to understand the cephfs status. +It appears to be hung, because ceph fs ls doesn't return. + +https://docs.ceph.com/docs/master/cephfs/ + + + #+name: ceph fs volume ls + #+call: rook-tool("ceph fs volume ls") + +** rados commands + #+name: rados commands + #+begin_src shell + kubectl -n rook-ceph exec -it \ + $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') \ + -- rados -h | grep -i list + #+end_src + + #+RESULTS: rados commands + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + lspools list pools + ls list objects in pool + lssnap list snaps + listxattr + listsnaps list the snapshots of this object + listomapkeys list the keys in the object map + listomapvals list the keys and vals in the object map + listwatchers list the watchers of this object + lock list + List all advisory locks on an object + list-inconsistent-pg list inconsistent PGs in given pool + list-inconsistent-obj list inconsistent objects in given PG + list-inconsistent-snapset list inconsistent snapsets in the given PG + Use with ls to list objects in all namespaces + Use with ls to list objects in default namespace + #+end_EXAMPLE + +** rados lspools + Trying to understand the cephfs status. +It appears to be hung, because ceph fs ls doesn't return. + +https://docs.ceph.com/docs/master/cephfs/ + + #+name: rados lspools + #+call: rook-tool("rados lspools") + + #+RESULTS: rados lspools + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + ii-block-pool + iifs-metadata + iifs-data0 + #+end_EXAMPLE + +** rados ls + Trying to understand the cephfs status. +It appears to be hung, because ceph fs ls doesn't return. + +https://docs.ceph.com/docs/master/cephfs/ + + #+name: rados ls + #+begin_src shell :prologue "kubectl -n rook-ceph exec -i `kubectl -n rook-ceph get pod -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}' ` bash" :epilogue "" + rados ls -p iifs-metadata + #+end_src + + #+RESULTS: rados ls + #+begin_EXAMPLE + 601.00000000 + 602.00000000 + 600.00000000 + 603.00000000 + 1.00000000.inode + 200.00000000 + 200.00000001 + 606.00000000 + 607.00000000 + mds0_openfiles.0 + 608.00000000 + 604.00000000 + 500.00000000 + mds_snaptable + 605.00000000 + mds0_inotable + 100.00000000 + mds0_sessionmap + 609.00000000 + 400.00000000 + 100.00000000.inode + 1.00000000 + #+end_EXAMPLE + + #+call: rook-tool("-- rados ls -p iifs-data0") + + + +** ceph status + #+name: ceph status + #+call: rook-tool("ceph status") + #+RESULTS: ceph status + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + cluster: + id: 000bd0c3-d911-4dc7-8d72-f3024e714115 + health: HEALTH_WARN + OSD count 1 < osd_pool_default_size 3 + too few PGs per OSD (24 < min 30) + + services: + mon: 1 daemons, quorum a (age 3d) + mgr: a(active, since 3d) + mds: iifs:1 {0=iifs-a=up:active} 1 up:standby-replay + osd: 1 osds: 1 up (since 3d), 1 in (since 3d) + + data: + pools: 3 pools, 24 pgs + objects: 40 objects, 11 MiB + usage: 1.0 GiB used, 29 GiB / 30 GiB avail + pgs: 24 active+clean + + #+end_EXAMPLE +** ceph df + #+call: rook-tool[:wrap "src TEXT" :dir "/tmp"]("ceph df") + + #+RESULTS: + #+begin_src TEXT + Unable to use a TTY - input is not a terminal or the right kind of file + RAW STORAGE: + CLASS SIZE AVAIL USED RAW USED %RAW USED + hdd 30 GiB 29 GiB 7.1 MiB 1.0 GiB 3.36 + TOTAL 30 GiB 29 GiB 7.1 MiB 1.0 GiB 3.36 + + POOLS: + POOL ID STORED OBJECTS USED %USED MAX AVAIL + ii-block-pool 1 872 KiB 15 1.5 MiB 0 27 GiB + iifs-metadata 2 2.2 KiB 25 512 KiB 0 27 GiB + iifs-data0 3 0 B 0 0 B 0 27 GiB + #+end_src + +** ceph pg dump + #+call: rook-tool("ceph pg dump") + #+RESULTS: + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + dumped all + version 35893 + stamp 2019-12-30 03:52:01.102189 + last_osdmap_epoch 0 + last_pg_scan 0 + PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES OMAP_BYTES* OMAP_KEYS* LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP SNAPTRIMQ_LEN + 1.7 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.039628 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.6 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.035377 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.5 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.040298 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.4 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.035829 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.0 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.040512 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.1 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.035688 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.2 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.039864 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + 1.3 0 0 0 0 0 0 0 0 0 0 active+clean 2019-12-29 08:01:34.039064 0'0 11:17 [0] 0 [0] 0 0'0 2019-12-29 08:01:21.070991 0'0 2019-12-29 08:01:21.070991 0 + + 1 0 0 0 0 0 0 0 0 0 0 + + sum 0 0 0 0 0 0 0 0 0 0 + OSD_STAT USED AVAIL USED_RAW TOTAL HB_PEERS PG_SUM PRIMARY_PG_SUM + 0 1.7 MiB 893 GiB 1.0 GiB 894 GiB [] 8 8 + sum 1.7 MiB 893 GiB 1.0 GiB 894 GiB + + ,* NOTE: Omap statistics are gathered during deep scrub and may be inaccurate soon afterwards depending on utilisation. See http://docs.ceph.com/docs/master/dev/placement-group/#omap-statistics for further details. + #+end_EXAMPLE +** ceph osd tree + #+call: rook-tool("ceph osd tree") + + #+RESULTS: + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF + -1 0.02930 root default + -3 0.02930 host ubuntu + 0 hdd 0.02930 osd.0 up 1.00000 1.00000 + #+end_EXAMPLE +** ceph osd stat + #+call: rook-tool("ceph osd stat") +** ceph mds stat + #+call: rook-tool("ceph mds stat") +** ceph mon stat + #+call: rook-tool("ceph mon stat") +** ceph osd lspools + #+call: rook-tool("ceph osd lspools") + + #+RESULTS: + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + 1 ii-block-pool + 2 iifs-metadata + 3 iifs-data0 + #+end_EXAMPLE + +** ceph rados df + #+call: rook-tool("rados df") + + #+RESULTS: + #+begin_EXAMPLE + Unable to use a TTY - input is not a terminal or the right kind of file + POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR USED COMPR UNDER COMPR + ii-block-pool 1.5 MiB 15 0 15 0 0 0 67033 64 MiB 100 1.2 MiB 0 B 0 B + iifs-data0 0 B 0 0 0 0 0 0 0 0 B 0 0 B 0 B 0 B + iifs-metadata 512 KiB 25 0 25 0 0 0 1 1 KiB 49 17 KiB 0 B 0 B + + total_objects 40 + total_used 1.0 GiB + total_avail 29 GiB + total_space 30 GiB + #+end_EXAMPLE + #+call: rook-tool("ceph osd get ii-block-pool") +** ceph auth list +Might want to ensure you don't save the results of this one + #+call: rook-tool[:results silent]("ceph auth list") + + + #+call: rook-tool("rdb ls") + + #+RESULTS: + #+begin_EXAMPLE + #+end_EXAMPLE + +* Get keys onto pi + #+begin_src shell :dir ~/ + scp ~/.ssh/id_rsa-4096-20090605-ccc.pub ubuntu@192.168.1.101:.ssh/authorized_keys + #+end_src + +* Update iptables etc to -legacy + #+begin_src shell :dir /ssh:ubuntu@192.168.1.101:/ + sudo update-alternatives --set iptables /usr/sbin/iptables-legacy + sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy + sudo update-alternatives --set arptables /usr/sbin/arptables-legacy + sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy + #+end_src + +* ensure cgroups for raspi + +We had an error regarding cgroups when trying to run kubeadm init. +Stephen noted this was the fix he's used on his pi's. + + #+begin_src shell + echo "cgroup_enable=memory cgroup_memory=1" | sudo tee -a /boot/firmware/nobtcmd.txt + #+end_src + + #+begin_src shell :results silent + sudo reboot + #+end_src + +* install docker +** install + #+begin_src shell :results silent + sudo apt-get install -y docker.io + #+end_src +** add ubuntu to docker group + #+begin_src shell :results silent + sudo adduser ubuntu docker + #+end_src +** check + #+begin_src shell + id + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + uid=1000(ubuntu) gid=1000(ubuntu) groups=1000(ubuntu),4(adm),20(dialout),24(cdrom),25(floppy),27(sudo),29(audio),30(dip),44(video),46(plugdev),114(netdev),117(lxd),118(docker) + #+end_EXAMPLE + +** docker ps check + + #+begin_src shell + docker ps + #+end_src + + #+RESULTS: + : CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +* Install kube-* + +** setup repos + #+begin_src shell + sudo apt-get update && sudo apt-get install -y apt-transport-https curl + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + cat <<-EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list + deb https://apt.kubernetes.io/ kubernetes-xenial main + EOF + sudo apt-get update + #+end_src +** install and don't upgrade packages + #+begin_src shell :results silent + sudo apt-get install -y kubeadm kubectl kubelet + sudo apt-mark hold kubelet kubeadm kubectl + #+end_src + +** Verify + #+begin_src shell + kubectl version + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.0", GitCommit:"70132b0f130acc0bed193d9ba59dd186f0e634cf", GitTreeState:"clean", BuildDate:"2019-12-07T21:20:10Z", GoVersion:"go1.13.4", Compiler:"gc", Platform:"linux/arm64"} + Server Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.0", GitCommit:"70132b0f130acc0bed193d9ba59dd186f0e634cf", GitTreeState:"clean", BuildDate:"2019-12-07T21:12:17Z", GoVersion:"go1.13.4", Compiler:"gc", Platform:"linux/arm64"} + #+end_EXAMPLE + +Ensure that docker info shows no errors relating to cgroups. + + #+begin_src shell :results code + ( + docker info + ) 2>&1 + : + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + Client: + Debug Mode: false + + Server: + Containers: 20 + Running: 17 + Paused: 0 + Stopped: 3 + Images: 9 + Server Version: 19.03.2 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: true + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog + Swarm: inactive + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: + runc version: + init version: + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 5.3.0-1014-raspi2 + Operating System: Ubuntu 19.10 + OSType: linux + Architecture: aarch64 + CPUs: 4 + Total Memory: 3.703GiB + Name: ubuntu + ID: 2W3G:EMYS:O363:SAS2:PLLY:ZLZL:WCGT:ZDM3:EBOR:NILT:Y2Y3:XPED + Docker Root Dir: /var/lib/docker + Debug Mode: false + Registry: https://index.docker.io/v1/ + Labels: + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Live Restore Enabled: false + + WARNING: No swap limit support + #+end_EXAMPLE +* Install kubernetes + #+begin_src shell + ip a show dev eth0 + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + 2: eth0: mtu 1500 qdisc mq state UP group default qlen 1000 + link/ether dc:a6:32:48:88:5f brd ff:ff:ff:ff:ff:ff + inet 192.168.1.101/24 brd 192.168.1.255 scope global dynamic eth0 + valid_lft 15988sec preferred_lft 15988sec + inet6 fe80::dea6:32ff:fe48:885f/64 scope link + valid_lft forever preferred_lft forever + #+end_EXAMPLE +** migrate old config + #+begin_src shell :async t + sudo kubeadm config migrate --old-config kubeadm-config.yaml --new-config kubeadm-config-new.yaml + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + W1228 01:52:42.588628 10899 validation.go:28] Cannot validate kube-proxy config - no validator is available + W1228 01:52:42.588818 10899 validation.go:28] Cannot validate kubelet config - no validator is available + #+end_EXAMPLE + + +** show that the config comes from the pi +#+begin_src shell :dir ~/ +kubectl config view +#+end_src + +#+RESULTS: +#+begin_EXAMPLE +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: DATA+OMITTED + server: https://192.168.1.101:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: REDACTED + client-key-data: REDACTED +#+end_EXAMPLE + +** note that coredns WILL NOT START until networking is happy + #+begin_src shell + kubectl get pods --all-namespaces + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAMESPACE NAME READY STATUS RESTARTS AGE + kube-system coredns-6955765f44-29kb9 1/1 Running 0 40s + kube-system coredns-6955765f44-hl925 1/1 Running 0 40s + kube-system etcd-ubuntu 1/1 Running 0 31s + kube-system kube-apiserver-ubuntu 1/1 Running 0 31s + kube-system kube-controller-manager-ubuntu 1/1 Running 0 31s + kube-system kube-proxy-lf66k 1/1 Running 0 40s + kube-system kube-scheduler-ubuntu 1/1 Running 0 31s + kube-system weave-net-nlskh 2/2 Running 0 20s + #+end_EXAMPLE +** Core DNS Starts! + #+begin_src shell :wrap "src json" + COREDNS_NODE=$(kubectl get pod --namespace=kube-system -l k8s-app=kube-dns -o name | head -1) + kubectl get $COREDNS_NODE --namespace=kube-system + #+end_src + + #+RESULTS: + #+begin_src json + NAME READY STATUS RESTARTS AGE + coredns-6955765f44-lfbm7 1/1 Running 0 98s + #+end_src + + #+begin_src shell :wrap "src json" + COREDNS_NODE=$(kubectl get pod --namespace=kube-system -l k8s-app=kube-dns -o name | head -1) + kubectl get $COREDNS_NODE --namespace=kube-system + #+end_src + + #+RESULTS: + #+begin_src json + NAME READY STATUS RESTARTS AGE + coredns-6955765f44-h7bjj 1/1 Running 0 5m1s + #+end_src + #+begin_src shell + free -m + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + total used free shared buff/cache available + Mem: 3791 953 451 4 2386 2863 + Swap: 0 0 0 + #+end_EXAMPLE +* locally run kubectl + :PROPERTIES: + :header-args:shell+: :dir ~/ + :END: +** kubectl deploy some stuff + #+begin_src shell + kubectl version + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + Client Version: version.Info{Major:"1", Minor:"16", GitVersion:"v1.16.3", GitCommit:"b3cbbae08ec52a7fc73d334838e18d17e8512749", GitTreeState:"clean", BuildDate:"2019-11-13T11:23:11Z", GoVersion:"go1.12.12", Compiler:"gc", Platform:"linux/amd64"} + Server Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.0", GitCommit:"70132b0f130acc0bed193d9ba59dd186f0e634cf", GitTreeState:"clean", BuildDate:"2019-12-07T21:12:17Z", GoVersion:"go1.13.4", Compiler:"gc", Platform:"linux/arm64"} + #+end_EXAMPLE + +* TODO kubectl apply -f http://iimacs.org +* setup pi.ii.nz +** get ip + #+name: pi_ip + #+begin_src shell :cache yes + curl icanhazip.com + #+end_src + + #+RESULTS[9df271cb6b4030541da56f2edf034902fe5ab69d]: pi_ip + #+begin_EXAMPLE + 103.26.16.167 + #+end_EXAMPLE + +** setup/check dns (dnsimple.com for now) + :PROPERTIES: + :header-args:shell+: :dir ~/ + :END: + #+begin_src shell + host pi.ii.nz + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + pi.ii.nz has address 103.26.16.167 + #+end_EXAMPLE + #+begin_src shell + host traefik.ii.nz + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + traefik.ii.nz is an alias for pi.ii.nz. + pi.ii.nz has address 103.26.16.167 + #+end_EXAMPLE + +* traefik +** install helm + #+begin_src shell + curl -s -L \ + https://get.helm.sh/helm-v3.0.2-linux-arm64.tar.gz \ + | sudo tar xvz -f - --strip-components 1 \ + -C /usr/local/bin linux-arm64/helm + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + linux-arm64/helm + #+end_EXAMPLE + +** check helm + #+begin_src shell + helm version + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + version.BuildInfo{Version:"v3.0.2", GitCommit:"19e47ee3283ae98139d98460de796c1be1e3975f", GitTreeState:"clean", GoVersion:"go1.13.5"} + #+end_EXAMPLE + +** TODO Setup org-babel block for htpasswd cli later. + For now http://www.htaccesstools.com/htpasswd-generator/ +** update helm repo to include default k8s stable + #+begin_src shell + helm repo add stable https://kubernetes-charts.storage.googleapis.com/ + helm repo update + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + "stable" has been added to your repositories + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "stable" chart repository + Update Complete. ⎈ Happy Helming!⎈ + #+end_EXAMPLE + +** configure and install + #+begin_src shell + helm uninstall traefiik + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + release "traefiik" uninstalled + #+end_EXAMPLE + + + #+begin_src shell + kubectl get svc --namespace traefik + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + ii-traefik LoadBalancer 10.96.86.160 80:57952/TCP,443:11583/TCP 82m + ii-traefik-dashboard ClusterIP 10.96.170.0 80/TCP 82m + #+end_EXAMPLE + #+begin_src shell + sudo ss -ltnp + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + State Recv-Q Send-Q Local Address:Port Peer Address:Port + LISTEN 0 128 0.0.0.0:22 0.0.0.0:* users:(("sshd",pid=1295,fd=3)) + LISTEN 0 128 192.168.1.101:9080 0.0.0.0:* users:(("cephcsi",pid=16941,fd=3)) + LISTEN 0 128 192.168.1.101:9081 0.0.0.0:* users:(("cephcsi",pid=16917,fd=5)) + LISTEN 0 128 192.168.1.101:11583 0.0.0.0:* users:(("kube-proxy",pid=10743,fd=11)) + LISTEN 0 128 192.168.1.101:57952 0.0.0.0:* users:(("kube-proxy",pid=10743,fd=8)) + LISTEN 0 128 127.0.0.1:6784 0.0.0.0:* users:(("weaver",pid=11546,fd=19)) + LISTEN 0 128 192.168.1.101:9090 0.0.0.0:* users:(("cephcsi",pid=16459,fd=6)) + LISTEN 0 128 192.168.1.101:9091 0.0.0.0:* users:(("cephcsi",pid=16442,fd=6)) + LISTEN 0 128 127.0.0.1:34631 0.0.0.0:* users:(("containerd",pid=1279,fd=8)) + LISTEN 0 128 127.0.0.1:10248 0.0.0.0:* users:(("kubelet",pid=10227,fd=28)) + LISTEN 0 128 127.0.0.1:10249 0.0.0.0:* users:(("kube-proxy",pid=10743,fd=15)) + LISTEN 0 128 192.168.1.101:2379 0.0.0.0:* users:(("etcd",pid=9682,fd=6)) + LISTEN 0 128 127.0.0.1:2379 0.0.0.0:* users:(("etcd",pid=9682,fd=5)) + LISTEN 0 128 192.168.1.101:2380 0.0.0.0:* users:(("etcd",pid=9682,fd=3)) + LISTEN 0 128 127.0.0.1:2381 0.0.0.0:* users:(("etcd",pid=9682,fd=11)) + LISTEN 0 128 127.0.0.1:35663 0.0.0.0:* users:(("kubelet",pid=10227,fd=14)) + LISTEN 0 128 127.0.0.1:10257 0.0.0.0:* users:(("kube-controller",pid=9700,fd=6)) + LISTEN 0 128 127.0.0.1:10259 0.0.0.0:* users:(("kube-scheduler",pid=9691,fd=6)) + LISTEN 0 128 127.0.0.53%lo:53 0.0.0.0:* users:(("systemd-resolve",pid=1179,fd=13)) + LISTEN 0 128 [::]:22 [::]:* users:(("sshd",pid=1295,fd=4)) + LISTEN 0 128 *:6781 *:* users:(("weave-npc",pid=11536,fd=10)) + LISTEN 0 128 *:6782 *:* users:(("weaver",pid=11546,fd=18)) + LISTEN 0 128 *:6783 *:* users:(("weaver",pid=11546,fd=17)) + LISTEN 0 128 *:10250 *:* users:(("kubelet",pid=10227,fd=37)) + LISTEN 0 128 *:10251 *:* users:(("kube-scheduler",pid=9691,fd=5)) + LISTEN 0 128 *:6443 *:* users:(("kube-apiserver",pid=9696,fd=5)) + LISTEN 0 128 *:10252 *:* users:(("kube-controller",pid=9700,fd=5)) + LISTEN 0 128 *:10256 *:* users:(("kube-proxy",pid=10743,fd=14)) + #+end_EXAMPLE + +** helm upgrade in place +#+NAME: helm upgrade in place +#+begin_SRC shell + helm upgrade \ + ii-traefik \ + --namespace traefik \ + --values traefik-1.7-config.yaml \ + stable/traefik +#+end_SRC + +#+RESULTS: helm upgrade in place +#+begin_EXAMPLE +Release "ii-traefik" has been upgraded. Happy Helming! +NAME: ii-traefik +LAST DEPLOYED: Tue Dec 31 00:57:38 2019 +NAMESPACE: traefik +STATUS: deployed +REVISION: 2 +TEST SUITE: None +NOTES: +1. Get Traefik's load balancer IP/hostname: + + NOTE: It may take a few minutes for this to become available. + + You can watch the status by running: + + $ kubectl get svc ii-traefik --namespace traefik -w + + Once 'EXTERNAL-IP' is no longer '': + + $ kubectl describe svc ii-traefik --namespace traefik | grep Ingress | awk '{print $3}' + +2. Configure DNS records corresponding to Kubernetes ingress resources to point to the load balancer IP/hostname found in step 1 +#+end_EXAMPLE + +** traefik logs + +#+BEGIN_SRC tmate :session foo:traefik_logs + TRAEFIK_POD=$( + kubectl get pod --selector=app=traefik --namespace=${TRAEFIK_NAMESPACE} -o name \ + | sed s:pod/::) + kubectl logs $TRAEFIK_POD --namespace=${TRAEFIK_NAMESPACE} -f | jq . +#+END_SRC + +** wait for ip to set dns for +*** wait (-w) for traefik service to get an IP via tmate + #+NAME: watch traefik get an IP + #+BEGIN_SRC tmate :session foo:watch + kubectl get svc --namespace=${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT} -w + #+END_SRC + +*** traefik service + #+NAME: get traefik service + #+BEGIN_SRC shell + kubectl get svc --namespace=${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT} + #+END_SRC + + #+RESULTS: get traefik service + #+begin_EXAMPLE + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 103m + #+end_EXAMPLE + +*** traefik inbound ip + + #+NAME: traefik inbound IP + #+BEGIN_SRC shell + kubectl describe svc --namespace=${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT} | grep Ingress | awk '{print $3}' + #+END_SRC + + #+RESULTS: traefik inbound IP + #+begin_EXAMPLE + 35.189.56.228 + #+end_EXAMPLE + +** look at traefik +*** deployment +#+NAME: ii-traefik deployment +#+BEGIN_SRC shell :wrap "SRC yaml" +kubectl get deployment --namespace ${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT} -o yaml +#+END_SRC + +#+RESULTS: ii-traefik deployment +#+begin_SRC yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "4" + creationTimestamp: "2019-08-30T05:07:16Z" + generation: 4 + labels: + app: traefik + chart: traefik-1.77.1 + heritage: Tiller + release: ii-traefik + name: ii-traefik + namespace: kube-system + resourceVersion: "647910" + selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/ii-traefik + uid: 08d82ebc-cae4-11e9-9d36-42010a9800d6 +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: traefik + release: ii-traefik + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 1ea5e59bdf9f15878cc4f13a3849d2f25ca9d4d48e8ad2fc9e7fb71e23584be5 + creationTimestamp: null + labels: + app: traefik + chart: traefik-1.77.1 + heritage: Tiller + release: ii-traefik + spec: + containers: + - args: + - --configfile=/config/traefik.toml + env: + - name: DNSIMPLE_BASE_URL + valueFrom: + secretKeyRef: + key: DNSIMPLE_BASE_URL + name: ii-traefik-dnsprovider-config + - name: DNSIMPLE_OAUTH_TOKEN + valueFrom: + secretKeyRef: + key: DNSIMPLE_OAUTH_TOKEN + name: ii-traefik-dnsprovider-config + image: traefik:1.7.14 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /ping + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + name: ii-traefik + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 8880 + name: httpn + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + - containerPort: 8080 + name: dash + protocol: TCP + readinessProbe: + failureThreshold: 1 + httpGet: + path: /ping + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /config + name: config + - mountPath: /ssl + name: ssl + - mountPath: /acme + name: acme + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: ii-traefik + serviceAccountName: ii-traefik + terminationGracePeriodSeconds: 60 + volumes: + - configMap: + defaultMode: 420 + name: ii-traefik + name: config + - name: ssl + secret: + defaultMode: 420 + secretName: ii-traefik-default-cert + - name: acme + persistentVolumeClaim: + claimName: ii-traefik-acme +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2019-08-30T05:07:48Z" + lastUpdateTime: "2019-08-30T05:07:48Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2019-08-30T05:07:16Z" + lastUpdateTime: "2019-08-30T05:21:11Z" + message: ReplicaSet "ii-traefik-fdcf76955" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 4 + readyReplicas: 1 + replicas: 1 + updatedReplicas: 1 +#+end_SRC + +*** services +**** traefik service list +#+NAME: ii-traefik service list +#+BEGIN_SRC shell +kubectl get services --namespace ${TRAEFIK_NAMESPACE} | grep traefik +#+END_SRC + +#+RESULTS: ii-traefik service list +#+begin_EXAMPLE +ii-traefik LoadBalancer 10.0.4.69 35.189.56.228 80:31199/TCP,443:31755/TCP 6d22h +ii-traefik-dashboard ClusterIP 10.0.1.227 80/TCP 6d22h +#+end_EXAMPLE + +**** traefik service +#+NAME: ii-traefik service +#+BEGIN_SRC shell :wrap "SRC yaml" +kubectl get services --namespace ${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT} -o yaml +#+END_SRC + +#+RESULTS: ii-traefik service +#+begin_SRC yaml +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: "2019-08-30T05:07:16Z" + labels: + app: traefik + chart: traefik-1.77.1 + heritage: Tiller + release: ii-traefik + name: ii-traefik + namespace: kube-system + resourceVersion: "645195" + selfLink: /api/v1/namespaces/kube-system/services/ii-traefik + uid: 08d6858a-cae4-11e9-9d36-42010a9800d6 +spec: + clusterIP: 10.0.4.69 + externalTrafficPolicy: Cluster + ports: + - name: http + nodePort: 31199 + port: 80 + protocol: TCP + targetPort: http + - name: https + nodePort: 31755 + port: 443 + protocol: TCP + targetPort: https + selector: + app: traefik + release: ii-traefik + sessionAffinity: None + type: LoadBalancer +status: + loadBalancer: + ingress: + - ip: 35.189.56.228 +#+end_SRC + +**** traefik-dashboard service +#+NAME: ii-traefik-dashbord service +#+BEGIN_SRC shell :wrap "SRC yaml" +kubectl get services --namespace ${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT}-dashboard -o yaml +#+END_SRC + +#+RESULTS: ii-traefik-dashbord service +#+begin_SRC yaml +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: "2019-08-30T05:07:16Z" + labels: + app: traefik + chart: traefik-1.77.1 + heritage: Tiller + release: ii-traefik + name: ii-traefik-dashboard + namespace: kube-system + resourceVersion: "644960" + selfLink: /api/v1/namespaces/kube-system/services/ii-traefik-dashboard + uid: 08d34a95-cae4-11e9-9d36-42010a9800d6 +spec: + clusterIP: 10.0.1.227 + ports: + - name: dashboard-http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: traefik + release: ii-traefik + sessionAffinity: None + type: ClusterIP +status: + loadBalancer: {} +#+end_SRC + + + +*** ingress +**** traefik ingress list +#+NAME: traefik ingress list +#+BEGIN_SRC shell +kubectl get ingress --namespace ${TRAEFIK_NAMESPACE} | grep traefik +#+END_SRC + +#+RESULTS: traefik ingress list +#+begin_EXAMPLE +ii-traefik-dashboard traefik.apisnoop.io 80 6d22h +#+end_EXAMPLE + +**** traefik-dashboard ingress +#+NAME: traefik-dashboard ingress +#+BEGIN_SRC shell :wrap "SRC yaml" +kubectl get ingress --namespace ${TRAEFIK_NAMESPACE} ${TRAEFIK_DEPLOYMENT}-dashboard -o yaml +#+END_SRC + +#+RESULTS: traefik-dashboard ingress +#+begin_SRC yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + creationTimestamp: "2019-08-30T05:07:16Z" + generation: 1 + labels: + app: traefik + chart: traefik-1.77.1 + heritage: Tiller + release: ii-traefik + name: ii-traefik-dashboard + namespace: kube-system + resourceVersion: "810181" + selfLink: /apis/extensions/v1beta1/namespaces/kube-system/ingresses/ii-traefik-dashboard + uid: 08d9af53-cae4-11e9-9d36-42010a9800d6 +spec: + rules: + - host: traefik.apisnoop.io + http: + paths: + - backend: + serviceName: ii-traefik-dashboard + servicePort: dashboard-http +status: + loadBalancer: {} +#+end_SRC + +#+BEGIN_SRC shell +kubectl api-resources -o wide +#+END_SRC + +#+RESULTS: +#+begin_EXAMPLE +NAME SHORTNAMES APIGROUP NAMESPACED KIND VERBS +bindings true Binding [create] +componentstatuses cs false ComponentStatus [get list] +configmaps cm true ConfigMap [create delete deletecollection get list patch update watch] +endpoints ep true Endpoints [create delete deletecollection get list patch update watch] +events ev true Event [create delete deletecollection get list patch update watch] +limitranges limits true LimitRange [create delete deletecollection get list patch update watch] +namespaces ns false Namespace [create delete get list patch update watch] +nodes no false Node [create delete deletecollection get list patch update watch] +persistentvolumeclaims pvc true PersistentVolumeClaim [create delete deletecollection get list patch update watch] +persistentvolumes pv false PersistentVolume [create delete deletecollection get list patch update watch] +pods po true Pod [create delete deletecollection get list patch update watch] +podtemplates true PodTemplate [create delete deletecollection get list patch update watch] +replicationcontrollers rc true ReplicationController [create delete deletecollection get list patch update watch] +resourcequotas quota true ResourceQuota [create delete deletecollection get list patch update watch] +secrets true Secret [create delete deletecollection get list patch update watch] +serviceaccounts sa true ServiceAccount [create delete deletecollection get list patch update watch] +services svc true Service [create delete get list patch update watch] +mutatingwebhookconfigurations admissionregistration.k8s.io false MutatingWebhookConfiguration [create delete deletecollection get list patch update watch] +validatingwebhookconfigurations admissionregistration.k8s.io false ValidatingWebhookConfiguration [create delete deletecollection get list patch update watch] +customresourcedefinitions crd,crds apiextensions.k8s.io false CustomResourceDefinition [create delete deletecollection get list patch update watch] +apiservices apiregistration.k8s.io false APIService [create delete deletecollection get list patch update watch] +controllerrevisions apps true ControllerRevision [create delete deletecollection get list patch update watch] +daemonsets ds apps true DaemonSet [create delete deletecollection get list patch update watch] +deployments deploy apps true Deployment [create delete deletecollection get list patch update watch] +replicasets rs apps true ReplicaSet [create delete deletecollection get list patch update watch] +statefulsets sts apps true StatefulSet [create delete deletecollection get list patch update watch] +tokenreviews authentication.k8s.io false TokenReview [create] +localsubjectaccessreviews authorization.k8s.io true LocalSubjectAccessReview [create] +selfsubjectaccessreviews authorization.k8s.io false SelfSubjectAccessReview [create] +selfsubjectrulesreviews authorization.k8s.io false SelfSubjectRulesReview [create] +subjectaccessreviews authorization.k8s.io false SubjectAccessReview [create] +horizontalpodautoscalers hpa autoscaling true HorizontalPodAutoscaler [create delete deletecollection get list patch update watch] +cronjobs cj batch true CronJob [create delete deletecollection get list patch update watch] +jobs batch true Job [create delete deletecollection get list patch update watch] +cephblockpools ceph.rook.io true CephBlockPool [delete deletecollection get list patch create update watch] +cephclients ceph.rook.io true CephClient [delete deletecollection get list patch create update watch] +cephclusters ceph.rook.io true CephCluster [delete deletecollection get list patch create update watch] +cephfilesystems ceph.rook.io true CephFilesystem [delete deletecollection get list patch create update watch] +cephnfses nfs ceph.rook.io true CephNFS [delete deletecollection get list patch create update watch] +cephobjectstores ceph.rook.io true CephObjectStore [delete deletecollection get list patch create update watch] +cephobjectstoreusers rcou,objectuser ceph.rook.io true CephObjectStoreUser [delete deletecollection get list patch create update watch] +certificatesigningrequests csr certificates.k8s.io false CertificateSigningRequest [create delete deletecollection get list patch update watch] +leases coordination.k8s.io true Lease [create delete deletecollection get list patch update watch] +endpointslices discovery.k8s.io true EndpointSlice [create delete deletecollection get list patch update watch] +events ev events.k8s.io true Event [create delete deletecollection get list patch update watch] +ingresses ing extensions true Ingress [create delete deletecollection get list patch update watch] +ingresses ing networking.k8s.io true Ingress [create delete deletecollection get list patch update watch] +networkpolicies netpol networking.k8s.io true NetworkPolicy [create delete deletecollection get list patch update watch] +runtimeclasses node.k8s.io false RuntimeClass [create delete deletecollection get list patch update watch] +objectbucketclaims obc,obcs objectbucket.io true ObjectBucketClaim [delete deletecollection get list patch create update watch] +objectbuckets ob,obs objectbucket.io false ObjectBucket [delete deletecollection get list patch create update watch] +poddisruptionbudgets pdb policy true PodDisruptionBudget [create delete deletecollection get list patch update watch] +podsecuritypolicies psp policy false PodSecurityPolicy [create delete deletecollection get list patch update watch] +clusterrolebindings rbac.authorization.k8s.io false ClusterRoleBinding [create delete deletecollection get list patch update watch] +clusterroles rbac.authorization.k8s.io false ClusterRole [create delete deletecollection get list patch update watch] +rolebindings rbac.authorization.k8s.io true RoleBinding [create delete deletecollection get list patch update watch] +roles rbac.authorization.k8s.io true Role [create delete deletecollection get list patch update watch] +volumes rv rook.io true Volume [delete deletecollection get list patch create update watch] +priorityclasses pc scheduling.k8s.io false PriorityClass [create delete deletecollection get list patch update watch] +volumesnapshotclasses snapshot.storage.k8s.io false VolumeSnapshotClass [delete deletecollection get list patch create update watch] +volumesnapshotcontents snapshot.storage.k8s.io false VolumeSnapshotContent [delete deletecollection get list patch create update watch] +volumesnapshots snapshot.storage.k8s.io true VolumeSnapshot [delete deletecollection get list patch create update watch] +csidrivers storage.k8s.io false CSIDriver [create delete deletecollection get list patch update watch] +csinodes storage.k8s.io false CSINode [create delete deletecollection get list patch update watch] +storageclasses sc storage.k8s.io false StorageClass [create delete deletecollection get list patch update watch] +volumeattachments storage.k8s.io false VolumeAttachment [create delete deletecollection get list patch update watch] +#+end_EXAMPLE +** explores +#+BEGIN_SRC shell +kubectl get ingress --all-namespaces +#+END_SRC + +#+RESULTS: +#+begin_EXAMPLE +No resources found +#+end_EXAMPLE + +** traefik-2.1-config.yaml +#+NAME: traefik.yaml helm values +#+BEGIN_SRC yaml :tangle /ssh:ubuntu@192.168.1.101:traefik-config.yaml :noweb yes + # Default values for Traefik + image: + name: traefik + tag: 2.1.1 + + # + # Configure the deployment + # + deployment: + # Number of pods of the deployment + replicas: 1 + + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 + + # + # Configure Traefik entry points + # Additional arguments to be passed at Traefik's binary + ## Use curly braces to pass values: `helm install --set="{--providers.kubernetesingress,--global.checknewversion=true}" ." + additionalArguments: + - "--providers.kubernetesingress" + - "--global.checknewversion=true" + + ports: + # The name of this one can't be changed as it is used for the readiness and + # liveness probes, but you can adjust its config to your liking + traefik: + port: 9000 + # Defines whether the port is exposed if service.type is LoadBalancer or + # NodePort. + # + # You SHOULD NOT expose the traefik port on production deployments. + # If you want to access it from outside of your cluster, + # use `kubectl proxy` or create a secure ingress + expose: false + # The exposed port for this service + exposedPort: 9000 + web: + port: 8000 + expose: true + exposedPort: 80 + websecure: + port: 8443 + expose: true + exposedPort: 443 + + # Options for the main traefik service, where the entrypoints traffic comes + # from. + service: + # type: LoadBalancer + # type: NodePort + # Additional annotations (e.g. for cloud provider specific config) + annotations: {} + # Additional entries here will be added to the service spec. Cannot contains + # type, selector or ports entries. + spec: {} + # externalTrafficPolicy: Cluster + # loadBalancerIp: "1.2.3.4" + # clusterIP: "2.3.4.5" + + dashboard: + # Enable the dashboard on Traefik + enable: true + + # Expose the dashboard and api through an ingress route at /dashboard + # and /api This is not secure and SHOULD NOT be enabled on production + # deployments + ingressRoute: true + + logs: + loglevel: WARN + # + resources: {} + # requests: + # cpu: "100m" + # memory: "50Mi" + # limits: + # cpu: "300m" + # memory: "150Mi" + nodeSelector: {} + tolerations: [] +#+END_SRC + +* k + #+begin_src shell + kubeadm config print init-defaults --component-configs KubeletConfiguration,KubeProxyConfiguration + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + W1220 01:00:47.420920 22898 validation.go:28] Cannot validate kubelet config - no validator is available + W1220 01:00:47.421090 22898 validation.go:28] Cannot validate kube-proxy config - no validator is available + W1220 01:00:47.424708 22898 validation.go:28] Cannot validate kube-proxy config - no validator is available + W1220 01:00:47.424762 22898 validation.go:28] Cannot validate kubelet config - no validator is available + W1220 01:00:47.427539 22898 validation.go:28] Cannot validate kube-proxy config - no validator is available + W1220 01:00:47.427595 22898 validation.go:28] Cannot validate kubelet config - no validator is available + apiVersion: kubeadm.k8s.io/v1beta2 + bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + token: abcdef.0123456789abcdef + ttl: 24h0m0s + usages: + - signing + - authentication + kind: InitConfiguration + localAPIEndpoint: + advertiseAddress: 1.2.3.4 + bindPort: 6443 + nodeRegistration: + criSocket: /var/run/dockershim.sock + name: ubuntu + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master + --- + apiServer: + timeoutForControlPlane: 4m0s + apiVersion: kubeadm.k8s.io/v1beta2 + certificatesDir: /etc/kubernetes/pki + clusterName: kubernetes + controllerManager: {} + dns: + type: CoreDNS + etcd: + local: + dataDir: /var/lib/etcd + imageRepository: k8s.gcr.io + kind: ClusterConfiguration + kubernetesVersion: v1.17.0 + networking: + dnsDomain: cluster.local + serviceSubnet: 10.96.0.0/12 + scheduler: {} + --- + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + fileCheckFrequency: 0s + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: "" + configSyncPeriod: 0s + conntrack: + maxPerCore: null + min: null + tcpCloseWaitTimeout: null + tcpEstablishedTimeout: null + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + kind: KubeProxyConfiguration + metricsBindAddress: "" + mode: "" + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + udpIdleTimeout: 0s + winkernel: + enableDSR: false + networkName: "" + sourceVip: "" + #+end_EXAMPLE + #+begin_src shell + #cat /etc/kubernetes/bootstrap-kubelet.conf + ls -la /etc/kubernetes/ + #ls -la /etc/default/kubelet + #KUBELET_EXTRA_ARGS + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + total 44 + drwxr-xr-x 4 root root 4096 Dec 20 01:58 . + drwxr-xr-x 98 root root 4096 Dec 19 18:33 .. + -rw------- 1 root root 5453 Dec 20 01:58 admin.conf + -rw------- 1 root root 5485 Dec 20 01:58 controller-manager.conf + -rw------- 1 root root 1861 Dec 20 01:58 kubelet.conf + drwxr-xr-x 2 root root 4096 Dec 20 01:58 manifests + drwxr-xr-x 3 root root 4096 Dec 20 01:58 pki + -rw------- 1 root root 5433 Dec 20 01:58 scheduler.conf + #+end_EXAMPLE +* Allow all ports +** apiserver cli arguments + #+begin_src shell + ps -axwu | grep kube-apiserver | sed 's/ /\n/g' \ + | grep \\-\\- | sort + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + --advertise-address=192.168.1.101 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/etc/kubernetes/pki/ca.crt + --enable-admission-plugins=NodeRestriction + --enable-bootstrap-token-auth=true + --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt + --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt + --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --insecure-port=0 + --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt + --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt + --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=6443 + --service-account-key-file=/etc/kubernetes/pki/sa.pub + --service-cluster-ip-range=10.96.0.0/12 + --service-node-port-range=22-30000 + --tls-cert-file=/etc/kubernetes/pki/apiserver.crt + --tls-private-key-file=/etc/kubernetes/pki/apiserver.key + #+end_EXAMPLE +** kubeproxy cli arguments + #+begin_src shell + ps -axwu | grep kube-proxy | sed 's/ /\n/g' \ + | grep \\-\\- | sort + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + --config=/var/lib/kube-proxy/config.conf + --hostname-override=ubuntu + #+end_EXAMPLE + + #+begin_src shell + ls -la /var/lib/ | grep ku + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + drwxr-xr-x 8 root root 4096 Dec 20 01:58 kubelet + #+end_EXAMPLE +[[/ssh:ubuntu@192.168.1.101|sudo:root@192.168.1.101:/etc/kubernetes/]] +* Requirements +** ip address +** port / hostport +** pvc / default storage class +*** nfs would work later +*** sig-storage-static-provisioner +We might want to have a local disk attached. +So we put on a 1TB ssd. +#+begin_src shell +lsblk | grep sda +#+end_src + +#+RESULTS: +#+begin_EXAMPLE +sda 8:0 0 894.3G 0 disk +#+end_EXAMPLE +** rook +Do we need to upgrade helm's approach? +Is rbac enabled? + +#+begin_src shell +kubectl cluster-info dump | grep authorization-mode +#+end_src + +#+RESULTS: +#+begin_EXAMPLE + "--authorization-mode=Node,RBAC", +#+end_EXAMPLE + +#+begin_src shell +# Create a ServiceAccount for Tiller in the `kube-system` namespace +kubectl --namespace kube-system create sa tiller + +# Create a ClusterRoleBinding for Tiller +kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + +# Patch Tiller's Deployment to use the new ServiceAccount +kubectl --namespace kube-system patch deploy/tiller-deploy -p '{"spec": {"template": {"spec": {"serviceAccountName": "tiller"}}}}' +#+end_src + +#+RESULTS: +#+begin_EXAMPLE +serviceaccount/tiller created +clusterrolebinding.rbac.authorization.k8s.io/tiller created +Error from server (NotFound): deployments.apps "tiller-deploy" not found +#+end_EXAMPLE +** update helm repo to include default k8s stable + #+begin_src shell + helm repo add rook-release https://charts.rook.io/release + helm search repo rook + # helm search rook-ceph + # helm install --namespace rook-ceph rook-release/rook-ceph + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + "rook-release" has been added to your repositories + NAME CHART VERSION APP VERSION DESCRIPTION + rook-release/rook-ceph v1.2.0 File, Block, and Object Storage Services for yo... + stable/rookout 0.1.0 1.0 A Helm chart for Rookout agent on Kubernetes + #+end_EXAMPLE +** Ensure lvm works + #+begin_src shell + sudo pvs -a + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + PV VG Fmt Attr PSize PFree + /dev/loop0 --- 0 0 + /dev/loop1 --- 0 0 + /dev/loop2 --- 0 0 + /dev/mmcblk0p1 --- 0 0 + /dev/mmcblk0p2 --- 0 0 + /dev/sda --- 0 0 + #+end_EXAMPLE +* Install Rook + #+begin_src shell + ls -la /dev/disk/by-path/platform-*pci*usb* + # ls -la /dev/disk/by-path/platform-.*pci.* + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + lrwxrwxrwx 1 root root 9 Dec 28 02:54 /dev/disk/by-path/platform-fd500000.pcie-pci-0000:01:00.0-usb-0:2:1.0-scsi-0:0:0:0 -> ../../sda + #+end_EXAMPLE +** for later + #+begin_src shell + ROOT_OP_POD=$(kubectl --namespace rook-ceph describe d + kubectl logs --namespace rook-ceph $ROOT_OP_POD + # ROOT_OP_POD=$(kubectl --namespace rook-ceph get pods -l "app=rook-ceph-operator" -o name) + # kubectl logs --namespace rook-ceph $ROOT_OP_POD + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + #+end_EXAMPLE + #+begin_src shell + docker images | grep ceph\\\|csi + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + csi-node-driver-registrar latest bf8c90f910d9 18 hours ago 16.6MB + quay.io/k8scsi/csi-node-driver-registrar v1.1.0 bf8c90f910d9 18 hours ago 16.6MB + csi-attacher latest 89499377228b 18 hours ago 44.7MB + quay.io/k8scsi/csi-attacher v1.2.0 89499377228b 18 hours ago 44.7MB + csi-snapshotter latest cd74005517c1 18 hours ago 46MB + quay.io/k8scsi/csi-snapshotter v1.2.2 cd74005517c1 18 hours ago 46MB + quay.io/k8scsi/csi-provisioner v1.4.0 2dc30504f03e 19 hours ago 52.6MB + csi-provisioner latest 2dc30504f03e 19 hours ago 52.6MB + quay.io/cephcsi/cephcsi v1.2.2 e73792b88385 19 hours ago 940MB + rook/ceph master 0de3709a4ba8 2 days ago 929MB + rook/ceph v1.2.0 2e69cb44dd57 10 days ago 929MB + ceph/ceph v14.2 7fb4cbf85c65 2 weeks ago 855MB + ceph/ceph v14.2.5 7fb4cbf85c65 2 weeks ago 855MB + quay.io/cephcsi/cephcsi d46311d35105 5 weeks ago 984MB + quay.io/k8scsi/csi-snapshotter 538dbe77c2f9 2 months ago 47.6MB + quay.io/k8scsi/csi-provisioner 2130c4e026a5 2 months ago 54.5MB + quay.io/k8scsi/csi-attacher eef7a9550ede 6 months ago 46.2MB + quay.io/k8scsi/csi-node-driver-registrar a93898755322 8 months ago 15.8MB + #+end_EXAMPLE +Need to enable docker experimental CLI options. +#+begin_src shell +mkdir -p ~/.docker +echo '{"experimental":"enabled"}' > ~/.docker/config.json +rm ~/.docker/config.json +rm -rf ~/.docker +#+end_src + +#+RESULTS: +#+begin_EXAMPLE +#+end_EXAMPLE + #+begin_src shell :var DOCKER_CLI_EXPERIMENTAL="enabled" + echo $DOCKER_CLI_EXPERIMENTAL + docker manifest inspect quay.io/cephcsi/cephcsi:v1.2.2 + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + enabled + docker manifest inspect is only supported on a Docker cli with experimental cli features enabled + #+end_EXAMPLE + + #+begin_src shell + docker manifest inspect rook/ceph:master + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 1787, + "digest": "sha256:f8268ed131d0ad151d749bcfa9692b7341c410625568445b8107a67019c2172a", + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 1788, + "digest": "sha256:e586993b4db487dd022eb85ea5b1f81afdcf9324bd272e9ce1648b6846bf11e7", + "platform": { + "architecture": "arm64", + "os": "linux" + } + } + ] + } + #+end_EXAMPLE + + #+begin_src shell + docker manifest inspect quay.io/k8scsi/csi-snapshotter + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + no such manifest: quay.io/k8scsi/csi-snapshotter:latest + #+end_EXAMPLE +* rebuilding csi + #+begin_src shell + mkdir -p ~/go/src/github.com/ceph + cd ~/go/src/github.com/ceph + git clone --recursive --branch v1.2.2 --depth 1 https://github.com/ceph/ceph-csi + #+end_src + #+begin_src shell + cd ~/go/src/github.com/ceph/ceph-csi + make image-cephcsi + #+end_src + + #+RESULTS: + #+begin_EXAMPLE + cephcsi image settings: quay.io/cephcsi/cephcsi version v1.2.2 + if [ ! -d ./vendor ]; then dep ensure -vendor-only; fi + CGO_ENABLED=0 GOOS=linux go build -a -ldflags ' -X github.com/ceph/ceph-csi/pkg/util.GitCommit=f8c854dc7d6ffff02cb2eed6002534dc0473f111 -X github.com/ceph/ceph-csi/pkg/util.DriverVersion=v1.2.2 -extldflags "-static"' -o _output/cephcsi ./cmd/ + cp _output/cephcsi deploy/cephcsi/image/cephcsi + docker build -t quay.io/cephcsi/cephcsi:v1.2.2 deploy/cephcsi/image + Sending build context to Docker daemon 557.1kB Sending build context to Docker daemon 3.342MB Sending build context to Docker daemon 6.128MB Sending build context to Docker daemon 8.913MB Sending build context to Docker daemon 11.14MB Sending build context to Docker daemon 13.93MB Sending build context to Docker daemon 16.71MB Sending build context to Docker daemon 19.5MB Sending build context to Docker daemon 22.28MB Sending build context to Docker daemon 25.07MB Sending build context to Docker daemon 27.85MB Sending build context to Docker daemon 30.64MB Sending build context to Docker daemon 32.31MB Sending build context to Docker daemon 34.54MB Sending build context to Docker daemon 37.32MB Sending build context to Docker daemon 40.11MB Sending build context to Docker daemon 42.73MB + Step 1/7 : FROM ceph/ceph:v14.2 + v14.2: Pulling from ceph/ceph + Digest: sha256:8c86fc6acf47edb6c3e38777b72c3fea2bad5be18c7e88553673205b378d0121 + Status: Downloaded newer image for ceph/ceph:v14.2 + ---> 7fb4cbf85c65 + Step 2/7 : LABEL maintainers="Ceph-CSI Authors" + ---> Running in 1469b57d9381 + Removing intermediate container 1469b57d9381 + ---> de4f1e0ee45e + Step 3/7 : LABEL description="Ceph-CSI Plugin" + ---> Running in e6a81785954e + Removing intermediate container e6a81785954e + ---> 38c1b8574903 + Step 4/7 : ENV CSIBIN=/usr/local/bin/cephcsi + ---> Running in d13c37ddc1a4 + Removing intermediate container d13c37ddc1a4 + ---> f2991dd06573 + Step 5/7 : COPY cephcsi $CSIBIN + ---> 459e4a563a1d + Step 6/7 : RUN chmod +x $CSIBIN + ---> Running in 1ccfd3b5884d + Removing intermediate container 1ccfd3b5884d + ---> 24d99a35aef1 + Step 7/7 : ENTRYPOINT ["/usr/local/bin/cephcsi"] + ---> Running in 1cb36208fc39 + Removing intermediate container 1cb36208fc39 + ---> e73792b88385 + Successfully built e73792b88385 + Successfully tagged quay.io/cephcsi/cephcsi:v1.2.2 + #+end_EXAMPLE + + #+begin_src shell + sudo apt-get install -y make golang-go + #+end_src + + + #+begin_src shell + mkdir -p ~/go/src/github.com/kubernetes-csi + cd ~/go/src/github.com/kubernetes-csi + git clone --recursive --branch v1.4.0 --depth 1 https://github.com/kubernetes-csi/external-provisioner + #+end_src + + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi/external-provisioner + make container + docker tag csi-provisioner:latest quay.io/k8scsi/csi-provisioner:v1.4.0 + #+end_src + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi + git clone --recursive --branch v1.2.2 --depth 1 https://github.com/kubernetes-csi/external-snapshotter + #+end_src + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi/external-snapshotter + make container + docker tag csi-snapshotter:latest quay.io/k8scsi/csi-snapshotter:v1.2.2 + #+end_src + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi + git clone --recursive --branch v1.2.0 --depth 1 https://github.com/kubernetes-csi/external-attacher + #+end_src + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi/external-attacher + make container + docker tag csi-attacher:latest quay.io/k8scsi/csi-attacher:v1.2.0 + #+end_src + + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi + git clone --recursive --branch v1.1.0 --depth 1 https://github.com/kubernetes-csi/node-driver-registrar + #+end_src + #+begin_src shell + cd ~/go/src/github.com/kubernetes-csi/node-driver-registrar + make container + docker tag csi-node-driver-registrar:latest quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + #+end_src +* rebuilding k14s / kwt + #+begin_src shell + mkdir -p ~/go/src/github.com/k14s + cd ~/go/src/github.com/k14s + git clone --recursive --branch v0.0.6 --depth 1 https://github.com/k14s/kwt + #+end_src + + #+begin_src shell + cd ~/go/src/github.com/k14s/kwt/images/sshd + docker build -t cppforfile/sshd . + #+end_src + + #+begin_src shell + sudo apt-get -y install iptables + #+end_src +* building tilt +Tilt is required for building tmate + #+begin_src shell + mkdir -p ~/go/src/github.com/windmilleng + cd ~/go/src/github.com/windmilleng + git clone --recursive --branch v0.10.25 --depth 1 https://github.com/windmilleng/tilt + #+end_src + #+begin_src shell + cd ~/go/src/github.com/windmilleng/tilt + make + #+end_src + +* rebuilding tmate + #+begin_src shell + + cd ~/go/src/github.com/k14s + git clone --recursive --branch v0.0.6 --depth 1 https://github.com/k14s/kwt + #+end_src +* web + #+begin_src emacs-lisp +(symbol-value 'file-local-variables-alist) +(alist-get 'ii file-local-variables-alist) + + #+end_src +# Local Variables: +# ii: set +# End: diff --git a/research/postgres_operator_on_pair.org b/research/postgres_operator_on_pair.org new file mode 100644 index 0000000..ff64864 --- /dev/null +++ b/research/postgres_operator_on_pair.org @@ -0,0 +1,181 @@ +#+TITLE: Postgres Operator On Pair +Stand up postgres operator on multi node cluster +Work based on https://github.com/zalando/postgres-operator +* yaml needed: +#+begin_src yaml :tangle /tmp/pg_operator.yaml :results silent +apiVersion: helm.fluxcd.io/v1 +kind: HelmRelease +metadata: + name: postgres-operator + namespace: postgres-operator +spec: + releaseName: postgres-operator + chart: + git: https://github.com/zalando/postgres-operator.git + ref: v1.6.1 + path: charts/postgres-operator + values: + configKubernetes: + enable_pod_antiaffinity: "true" +#+end_src + +#+begin_src yaml :tangle /tmp/pg_database.yaml :results silent +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: ii-ii + namespace: ii-db +spec: + enableConnectionPooler: true + teamId: "ii" + volume: + size: 20Gi + numberOfInstances: 3 + users: + ii: # database owner + - superuser + - createdb + databases: + ii: ii # dbname: owner + postgresql: + version: "12" +#+end_src + + + +* Create the postgres-operator ns +#+begin_src shell +kubectl get ns -A +#+end_src + +#+begin_src shell +kubectl create ns postgres-operator +#+end_src + +* Apply the chart +#+begin_src shell +kubectl create namespace ii-db +#+end_src + +#+RESULTS: +#+begin_example +namespace/ii-db created +#+end_example + +#+begin_src shell +kubectl apply -f /tmp/pg_operator.yaml +#+end_src + +#+RESULTS: +#+begin_example +helmrelease.helm.fluxcd.io/postgres-operator created +#+end_example + +#+begin_src shell +kubectl apply -f /tmp/pg_database.yaml +#+end_src + +#+RESULTS: +#+begin_example +postgresql.acid.zalan.do/ii-ii created +#+end_example + +A + +* Create shell function to connect to psql in pg-operator +THis can be used and tangled out, I ended up just using the lines to get the ip and password +#+begin_src yaml :tangle /tmp/psql-shell.sh :results silent +#!/bin/bash + +if [ "$KUBECTL_PSQL_DEBUG" = "true" ]; then + set -x +fi + +function requireNonEmpty { + if [ -z "$2" ]; then + echo "error: '$1' is a required field" > /dev/stderr + exit 1 + fi +} + +if [ $# -eq 0 ]; then + echo "kubectl-psql + +usage: kubectl psql -n NAMESPACE POSTGRES_OPERATOR_DB" + exit 0 +fi + +NAMESPACE="default" +args=() +while [ $# -gt 0 ]; do + key="$1" + + case $key in + -n | --namespace) + NAMESPACE="$2" + shift + shift + ;; + ,*) + args+=($1) + shift + ;; + esac +done + +NAME=${args[0]} +requireNonEmpty "name" "$NAME" +DATABASE=${args[1]} + +POSTGRESQL_DATABASES=$(kubectl -n $NAMESPACE get postgresql $NAME -o=jsonpath='{.spec.databases}') +DATABASE=${DATABASE:-$(echo $POSTGRESQL_DATABASES | jq -r 'keys[0]')} +USERNAME=$(echo $POSTGRESQL_DATABASES | jq -r ".${DATABASE}") +SECRET_NAME=$USERNAME.$NAME.credentials.postgresql.acid.zalan.do +SECRET=$(kubectl -n $NAMESPACE get secret $SECRET_NAME -o=jsonpath='{.data.password}' | base64 --decode) +kubectl -n $NAMESPACE exec -it deployment/$NAME-pooler -- psql postgresql://$USERNAME:$SECRET@$NAME-pooler/$DATABASE "$@" +#+end_src + +Lets go get the password +#+begin_src shell +kubectl -n ii-db get secret ii.ii-ii.credentials.postgresql.acid.zalan.do -o=jsonpath='{.data.password}' | base64 --decode +#+end_src + +#+RESULTS: +#+begin_example +DxSO4S1aUQG3dHoG8AXogt0rbm2PGc6HsVAVtSKnbsJF5bwi0CTKamGBULq6rhnu +#+end_example + +Lets go get our host ip ( it is the one that ends with ii-pooler ) +#+begin_src shell +kubectl get svc -n ii-db +#+end_src + +#+RESULTS: +#+begin_example +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ii-ii ClusterIP 10.97.202.14 5432/TCP 33m +ii-ii-config ClusterIP None 33m +ii-ii-pooler ClusterIP 10.110.111.126 5432/TCP 33m +ii-ii-repl ClusterIP 10.99.134.125 5432/TCP 33m +#+end_example + +* Connect to psql +For this connection I used: +user: ii +db: ii +pw: [one displayed above] +how: 10.110.111.126 +#+BEGIN_SRC sql-mode +SELECT schemaname, tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + schemaname | tablename +------------+----------- +(0 rows) + +#+end_SRC + + +Yas, I am connected and postgres is running on multiple nodes, how amazing is this? diff --git a/research/pyasn-lookup.org b/research/pyasn-lookup.org new file mode 100644 index 0000000..fca3740 --- /dev/null +++ b/research/pyasn-lookup.org @@ -0,0 +1,517 @@ +#+TITLE: Pyasn Lookup +Link to this document https://github.com/ii/org/blob/main/research/pyasn-lookup.org#lets-add-asn-results-to-the-dictionary +This is an investigation of https://github.com/hadiasghari/pyasn +Desired outcomes will be to parse the output from this library to tie each asn to it associated start and end ip ints so we can range over them to find customer asns +This document concerns itself with: +- Generating an asn to ip-range relationsip +- Importing resulting csv into PostgreSQL and split subnets into start_ip and end_ip +- Upload the results of the PG transformation to bq +- Make final table with asn, cidr, start_ip, end_ip, start_ip_int, end_ip_int +* Clone repo +#+BEGIN_SRC tmate :window pyasn +git clone https://github.com/hadiasghari/pyasn.git +#+END_SRC +* Install pyasn +#+BEGIN_SRC tmate :window pyasn +pip install pyasn +#+END_SRC +Test python is up and working +#+BEGIN_SRC python tmate :window python +#print('Please wait') +return 'A line of text.\n'.rstrip() +#+END_SRC + +#+RESULTS: +#+begin_src python +A line of text. +#+end_src +* Download and process the latest rib file +You can view the full list on ftp://archive.routeviews.org//bgpdata/2021.05/RIBS/ +obviosly adjust for date +To get the latest file simply run --latest +pip installed the scripts in .local so I am adding it to my path +These scripts are in the repo in pyasn-utils +TODO: Decide how we want to reference these scripts +#+BEGIN_SRC shell :dir (concat (getenv "HOME") "/foo") +export PATH="/home/ii/.local/bin/:$PATH" +pyasn_util_download.py --latest +#+END_SRC + +#+RESULTS: +#+begin_example +Connecting to ftp://archive.routeviews.org +Finding most recent archive in /bgpdata/2021.06/RIBS ... +Finding most recent archive in /bgpdata/2021.05/RIBS ... +Downloading ftp://archive.routeviews.org//bgpdata/2021.05/RIBS/rib.20210531.2200.bz2 +Download complete. +#+end_example +* Process rib file so we can transform it into a local data file +#+BEGIN_SRC shell :dir (concat (getenv "HOME") "/foo") +ls -al | grep rib +#+END_SRC + +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 124276057 Jun 1 11:35 rib.20210531.2200.bz2 +#+end_example + +#+BEGIN_SRC shell :dir (concat (getenv "HOME") "/foo") +export PATH="/home/ii/.local/bin/:$PATH" +pyasn_util_convert.py --single rib.20210531.2200.bz2 ipasn_20140531_1.dat +#+END_SRC + +#+RESULTS: +#+begin_example +IPASN database saved (923124 IPV4 + 0 IPV6 prefixes) +#+end_example + +#+BEGIN_SRC shell :dir (concat (getenv "HOME") "/foo") +ls -alrt | tail -3 +#+END_SRC + +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 124276057 Jun 1 11:35 rib.20210531.2200.bz2 +-rw-r--r-- 1 ii ii 19939342 Jun 1 11:42 ipasn_20140531_1.dat +#+end_example +* Use pyasn to parse the data file +For this initial poc, I want to look up an ASN(1128) and get subnets back. +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") +import pyasn +asndb = pyasn.pyasn('ipasn_20140531_1.dat') +return asndb.get_as_prefixes(1128) +#+END_SRC + +#+RESULTS: +#+begin_src python +{'131.180.0.0/16', '130.161.0.0/16', '145.94.0.0/16'} +#+end_src +* Plan flow that will get network ranges for all asns +GOAL: +I want a script that takes in asns and .dat file and returns a list of asns with associated network ranges + +I decided restrict this script to just pyasn in python, +I will deal with data file generation and full ASN list generation seperateley +High level plan: +- Start with file that has all ASNs, we can get them from: + - https://bgp.potaroo.net/cidr/autnums.html has been a good resource + - Look what pyasn can produce + - Other? There is a lot of them out there, I struggled to find a complete one, happy to hear suggestions. +- Import pyasn +- Read in asnNumFile, set variable to the resulting array +- Set db string for .dat file we are using for the lookup. +- Range through array, for each element run get_as_prefixes() - ASN to subnet lookup +- Store result in a dictionary, + - the object has a key/value of asn/ip range. + - there are multiple ip ranges per asn +- When all the data has been appended to the dictionary write it to file as a csv +** Very nice succinct summary from zz +- i can open the asn list file and read it +- i can assign it to a variable. +- i can reference the variable and return the whole array +- i can reference an index of this variable and return one item of the array +- i can loop through an array and call a function based on the variable +- i can loop through an array and add each item and the results from the function to a dict +- i can write this dict to a CSV file +** Dictionary struct + The dictionary will look something like this: + #+BEGIN_EXAMPLE +{ + '6554': [ + - 123412, + - 123133, + - 123231 + ] +} + #+END_EXAMPLE +* Start with reading file into variable +Printing the resulting dictionary +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") :results output +print ('start') +File_object = open(r"/home/ii/foo/asnNumbersOnlyTail10.txt","r+") +asnNum = File_object.readlines() +print (asnNum) +dictOfWords = {i : 5 for i in asnNum} +print(dictOfWords) +print ('end') +#+END_SRC + +#+RESULTS: +#+begin_src python +start +['399549\n', '399553\n', '399561\n', '399564\n', '399593\n', '399588\n', '399587\n', '399724\n'] +{'399549\n': 5, '399553\n': 5, '399561\n': 5, '399564\n': 5, '399593\n': 5, '399588\n': 5, '399587\n': 5, '399724\n': 5} +end +#+end_src +* Lets add asn results to the dictionary +Printing the resulting dictionary +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") :results output +## Import pyasn +import pyasn +print('start') +## Open asnNumFile and read +asnFile = "/home/ii/foo/asnNumbersOnlyTail10.txt" +asnNum = [line.rstrip() for line in open(asnFile, "r+")] +print(asnNum) +## assign our dat file connection string +asndb = pyasn.pyasn('ipasn_20140531_1.dat') +## Declare empty dictionary +destDict = {} +## Loop through list of asns +for singleAsn in asnNum: + ## Set asn to int + print("val of x: ", singleAsn) + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + print("This is x and it's prefix: ", singleAsn, asndb.get_as_prefixes(singleAsn)) + ## Add subnets with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + destDict.update(originAsnDict) + print("Value of destDict", destDict) +print("dict of words: ", originAsnDict) +print("Print final dictionary: ", destDict) +print("last subnets returned: ", subnets) +print('end') +#+END_SRC + +Look ma, good results! +Note the output from the final dictionary, it has output of each run through the loop appended to it. +#+RESULTS: +#+begin_src python +start +['399549', '399553', '399561', '399564', '399593', '399588', '399587'] +val of x: 399549 +This is x and it's prefix: 399549 {'216.87.86.0/24', '216.87.87.0/24'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549'} +val of x: 399553 +This is x and it's prefix: 399553 {'108.165.228.0/22'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553'} +val of x: 399561 +This is x and it's prefix: 399561 {'163.123.163.0/24'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561'} +val of x: 399564 +This is x and it's prefix: 399564 {'205.178.171.0/24'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564'} +val of x: 399593 +This is x and it's prefix: 399593 {'205.236.101.0/24'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564', '205.236.101.0/24': '399593'} +val of x: 399588 +This is x and it's prefix: 399588 {'172.110.143.0/24'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564', '205.236.101.0/24': '399593', '172.110.143.0/24': '399588'} +val of x: 399587 +This is x and it's prefix: 399587 {'193.3.54.0/24', '193.8.186.0/24', '193.8.187.0/24', '193.8.184.0/24', '193.8.185.0/24'} +Value of destDict {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564', '205.236.101.0/24': '399593', '172.110.143.0/24': '399588', '193.3.54.0/24': '399587', '193.8.186.0/24': '399587', '193.8.187.0/24': '399587', '193.8.184.0/24': '399587', '193.8.185.0/24': '399587'} +dict of words: {'193.3.54.0/24': '399587', '193.8.186.0/24': '399587', '193.8.187.0/24': '399587', '193.8.184.0/24': '399587', '193.8.185.0/24': '399587'} +Print final dictionary: {'216.87.86.0/24': '399549', '216.87.87.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564', '205.236.101.0/24': '399593', '172.110.143.0/24': '399588', '193.3.54.0/24': '399587', '193.8.186.0/24': '399587', '193.8.187.0/24': '399587', '193.8.184.0/24': '399587', '193.8.185.0/24': '399587'} +last subnets returned: {'193.3.54.0/24', '193.8.186.0/24', '193.8.187.0/24', '193.8.184.0/24', '193.8.185.0/24'} +end +#+end_src +* Lets go output the results to a csv +** sample code +Just a simple POC to see how outputting csv data works +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") :results output +import csv +a_file = open("sample.csv", "w") +a_dict = {"a": 1, "b": 2} + +writer = csv.writer(a_file) +for key, value in a_dict.items(): + writer.writerow([key, value]) + +a_file.close() +#+END_SRC + +#+RESULTS: +#+begin_src python +a,1 +b,2 +#+end_src +Easy enough. +** Add csv output to script +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") :results output +## Import pyasn and csv +import pyasn +import csv +# Lets go! +print('start') +## Open asnNumFile and read +asnFile = "/home/ii/foo/asnNumbersOnlyTail10.txt" +asnNum = [line.rstrip() for line in open(asnFile, "r+")] +# print(asnNum) +## assign our dat file connection string +asndb = pyasn.pyasn('ipasn_20140531_1.dat') +## Declare empty dictionary +destDict = {} +## Loop through list of asns +for singleAsn in asnNum: + ## Set asn to int + ## print("val of x: ", singleAsn) + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + ## print("This is x and it's prefix: ", singleAsn, asndb.get_as_prefixes(singleAsn)) + ## Add subnets with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + destDict.update(originAsnDict) + ## print("Value of destDict", destDict) +# print("dict of asn/net-ranges from the las loop: ", originAsnDict) +print("Print final dictionary: ", destDict) +# print("last subnets returned: ", subnets) +# Open file for writing +resultsCsv = open("pyAsnOutput.csv", "w") +# write to csv +writer = csv.writer(resultsCsv) +for key, value in destDict.items(): + writer.writerow([key, value]) +## winner winner chicken dinner +print('end') +#+END_SRC + +#+RESULTS: +#+begin_src python +start +Print final dictionary: {'216.87.87.0/24': '399549', '216.87.86.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564', '205.236.101.0/24': '399593', '172.110.143.0/24': '399588', '193.8.186.0/24': '399587', '193.8.185.0/24': '399587', '193.8.184.0/24': '399587', '193.3.54.0/24': '399587', '193.8.187.0/24': '399587'} +end +#+end_src +Yip I conrimed the csv got generated and has the right content. +* Add fault tolerance +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") :results output +## Import pyasn and csv +import pyasn +import csv +# Lets go! +print('start') +## Set file path +asnFile = "/home/ii/foo/asnNumbersOnlyTail10.txt" +## Open asnNumFile and read +asnNum = [line.rstrip() for line in open(asnFile, "r+")] +## assign our dat file connection string +asndb = pyasn.pyasn('ipasn_20140531_1.dat') +## Declare empty dictionary +destDict = {} +singleAsn = "" +## Loop through list of asns +for singleAsn in asnNum: + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + ## Add checking to make sure we have subnets + ## TODO: insert asn with no routes place holder so we know which failed without having to do a lookup + if not subnets: + print("This ASN has no subnets", singleAsn) + else: + ## Add subnets to our dictionaries with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + ## This is what lets us append each loop to the final destDict + destDict.update(originAsnDict) +resultsCsv = open("pyAsnOutput.csv", "w") +# write to csv +writer = csv.writer(resultsCsv) +for key, value in destDict.items(): + writer.writerow([key, value]) +## winner winner chicken dinner +print('end') +#+END_SRC + +#+RESULTS: +#+begin_src python +start +This is x and it's prefix: 399549 {'216.87.87.0/24', '216.87.86.0/24'} +This is x and it's prefix: 399553 {'108.165.228.0/22'} +This is x and it's prefix: 399561 {'163.123.163.0/24'} +This is x and it's prefix: 399564 {'205.178.171.0/24'} +This is x and it's prefix: 399593 {'205.236.101.0/24'} +This is x and it's prefix: 399588 {'172.110.143.0/24'} +This is x and it's prefix: 399587 {'193.8.186.0/24', '193.8.187.0/24', '193.8.185.0/24', '193.3.54.0/24', '193.8.184.0/24'} +This is x and it's prefix: 399724 None +This ASN has no subnets 399724 +Print final dictionary: {'216.87.87.0/24': '399549', '216.87.86.0/24': '399549', '108.165.228.0/22': '399553', '163.123.163.0/24': '399561', '205.178.171.0/24': '399564', '205.236.101.0/24': '399593', '172.110.143.0/24': '399588', '193.8.186.0/24': '399587', '193.8.187.0/24': '399587', '193.8.185.0/24': '399587', '193.3.54.0/24': '399587', '193.8.184.0/24': '399587'} +end +#+end_src +* Final script + +#+BEGIN_SRC python :dir (concat (getenv "HOME") "/foo") +## Import pyasn and csv +import pyasn +import csv + +## Set file path +asnFile = "/home/ii/foo/asnNumbersOnly.txt" +## Open asnNumFile and read +asnNum = [line.rstrip() for line in open(asnFile, "r+")] + +## assign our dat file connection string +asndb = pyasn.pyasn('ipasn_20140531_1.dat') +## Declare empty dictionary +destDict = {} +singleAsn = "" + +## Loop through list of asns +for singleAsn in asnNum: + ## Go look up the asn subnets (prefixes) + subnets = asndb.get_as_prefixes(singleAsn) + ## Add checking to make sure we have subnets + ## TODO: insert asn with no routes so we know which failed without having to do a lookup + if not subnets: + print("This ASN has no subnets", singleAsn) + else: + ## Add subnets to our dictionaries with + originAsnDict = {sbnets : singleAsn for sbnets in subnets} + ## This is what lets us append each loop to the final destDict + destDict.update(originAsnDict) + +## Open handle to output file +resultsCsv = open("pyAsnOutput.csv", "w") +# write to csv +writer = csv.writer(resultsCsv) +for key, value in destDict.items(): + writer.writerow([key, value]) + +## winner winner chicken dinner +#+END_SRC + +#+RESULTS: +#+begin_src python +None +#+end_src +* Push to bq (just testing) + +#+begin_src shell +bq load --autodetect k8s_artifacts_dataset_bb_test.py_asn_test /home/ii/foo/pyAsnOutput.csv +#+end_src + +#+RESULTS: +#+begin_example +It worked! +#+end_example +* Transform cidr to range using postgres +** Bring up Postgres +#+BEGIN_SRC tmate :window postgres +docker run -it --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_DB=ii postgres:12.2-alpine +#+END_SRC +Get ip for pg +#+BEGIN_SRC shell +echo $SHARINGIO_PAIR_LOAD_BALANCER_IP +#+END_SRC + +#+RESULTS: +#+begin_example +147.75.109.30 +#+end_example + +#+BEGIN_SRC sql-mode +\dn +--SELECT schemaname, tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + List of schemas + Name | Owner +--------+---------- + public | postgres +(1 row) + +#+end_SRC + +** Load csv into pg +#+BEGIN_SRC sql-mode +create table pyasn_ip_asn (ip cidr, asn int); +\COPY pyasn_ip_asn from '/home/ii/foo/pyAsnOutput.csv' DELIMITER ',' CSV; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +CREATE TABLE +#+end_SRC + +Confirmation: +#+BEGIN_SRC sql-mode +select * from pyasn_ip_asn limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + ip | asn +------------------+----- + 8.13.230.128/27 | 1 + 63.215.96.16/29 | 1 + 8.13.232.96/27 | 1 + 207.227.228.0/22 | 1 + 8.44.88.192/29 | 1 + 8.13.231.128/27 | 1 + 63.215.98.16/29 | 1 + 8.45.87.176/29 | 1 + 8.13.231.32/27 | 1 + 8.13.227.32/27 | 1 +(10 rows) + +#+end_SRC + +** Split that into start and end +#+BEGIN_SRC sql-mode +select asn as asn, +ip as ip, +host(network(ip)::inet) as ip_start, +host(broadcast(ip)::inet) as ip_end +into table pyasn_ip_asn_extended +from pyasn_ip_asn; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +SELECT 923058 +#+end_SRC + +#+BEGIN_SRC sql-mode +select * from pyasn_ip_asn_extended limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + asn | ip | ip_start | ip_end +-----+------------------+---------------+----------------- + 1 | 8.13.230.128/27 | 8.13.230.128 | 8.13.230.159 + 1 | 63.215.96.16/29 | 63.215.96.16 | 63.215.96.23 + 1 | 8.13.232.96/27 | 8.13.232.96 | 8.13.232.127 + 1 | 207.227.228.0/22 | 207.227.228.0 | 207.227.231.255 + 1 | 8.44.88.192/29 | 8.44.88.192 | 8.44.88.199 + 1 | 8.13.231.128/27 | 8.13.231.128 | 8.13.231.159 + 1 | 63.215.98.16/29 | 63.215.98.16 | 63.215.98.23 + 1 | 8.45.87.176/29 | 8.45.87.176 | 8.45.87.183 + 1 | 8.13.231.32/27 | 8.13.231.32 | 8.13.231.63 + 1 | 8.13.227.32/27 | 8.13.227.32 | 8.13.227.63 +(10 rows) + +#+end_SRC + +** Export to csv and upload to bq +#+begin_src sql-mode +\copy (select * from pyasn_ip_asn_extended) to '/tmp/pyasn_expanded_ipv4.csv' csv header; +#+end_src + +#+RESULTS: +#+begin_SRC example +COPY 923058 +#+end_SRC + +#+begin_src shell +bq load --autodetect k8s_artifacts_dataset_bb_test.pyasn_ip_asn_extended /tmp/pyasn_expanded_ipv4.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example +** Create table with asn, ips as ints +#+BEGIN_SRC shell +bq query --nouse_legacy_sql \ +' +SELECT + asn as asn, + ip as cidr_ip, + ip_start as start_ip, + ip_end as end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_start)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_end)) AS end_ip + from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended` + WHERE regexp_contains(ip_start, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"); +' +#+END_SRC diff --git a/research/rook/explore-rook-with-kubemacs-on-packet.org b/research/rook/explore-rook-with-kubemacs-on-packet.org new file mode 100644 index 0000000..162db48 --- /dev/null +++ b/research/rook/explore-rook-with-kubemacs-on-packet.org @@ -0,0 +1,410 @@ +#+TITLE: Explore Rook.io with Kubemacs on Packet +#+AUTHOR: Stephen Heywood +#+DATE: 17 March 2020 + + +* Overview + +Exploring [[https://rook.io/][rook.io]], a storage operator with [[https://github.com/kubemacs/kubemacs][kubemacs]] on [[https://www.packet.com/][Packet]]. +As Packet provides servers with extra disks it makes it easier to test out various storage configurations. + +* References + +- [[https://rook.io/][https://rook.io/]] +- https://rook.io/docs/rook/v1.2/ceph-quickstart.html +- [[https://www.youtube.com/watch?v=pwVsFHy2EdE][Kubecon 2018 - Intro: Rook - Jared Watts, Upbound]] + +* Clone rook repo + + #+begin_src shell + git clone --single-branch --branch release-1.2 https://github.com/rook/rook.git + #+end_src + + #+RESULTS: + #+begin_src shell + #+end_src + +* Create common settings for the rook-ceph + + #+begin_src shell :dir ./rook/cluster/examples/kubernetes/ceph + kubectl create -f common.yaml + #+end_src + + #+RESULTS: + #+begin_src shell + namespace/rook-ceph created + customresourcedefinition.apiextensions.k8s.io/cephclusters.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/cephclients.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/cephfilesystems.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/cephnfses.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/cephobjectstores.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/cephobjectstoreusers.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/cephblockpools.ceph.rook.io created + customresourcedefinition.apiextensions.k8s.io/volumes.rook.io created + customresourcedefinition.apiextensions.k8s.io/objectbuckets.objectbucket.io created + customresourcedefinition.apiextensions.k8s.io/objectbucketclaims.objectbucket.io created + clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-object-bucket created + clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created + clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt-rules created + role.rbac.authorization.k8s.io/rook-ceph-system created + clusterrole.rbac.authorization.k8s.io/rook-ceph-global created + clusterrole.rbac.authorization.k8s.io/rook-ceph-global-rules created + clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-cluster created + clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-cluster-rules created + clusterrole.rbac.authorization.k8s.io/rook-ceph-object-bucket created + serviceaccount/rook-ceph-system created + rolebinding.rbac.authorization.k8s.io/rook-ceph-system created + clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-global created + serviceaccount/rook-ceph-osd created + serviceaccount/rook-ceph-mgr created + serviceaccount/rook-ceph-cmd-reporter created + role.rbac.authorization.k8s.io/rook-ceph-osd created + clusterrole.rbac.authorization.k8s.io/rook-ceph-osd created + clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-system created + clusterrole.rbac.authorization.k8s.io/rook-ceph-mgr-system-rules created + role.rbac.authorization.k8s.io/rook-ceph-mgr created + role.rbac.authorization.k8s.io/rook-ceph-cmd-reporter created + rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created + rolebinding.rbac.authorization.k8s.io/rook-ceph-osd created + rolebinding.rbac.authorization.k8s.io/rook-ceph-mgr created + rolebinding.rbac.authorization.k8s.io/rook-ceph-mgr-system created + clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-mgr-cluster created + clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-osd created + rolebinding.rbac.authorization.k8s.io/rook-ceph-cmd-reporter created + podsecuritypolicy.policy/rook-privileged created + clusterrole.rbac.authorization.k8s.io/psp:rook created + clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-system-psp created + rolebinding.rbac.authorization.k8s.io/rook-ceph-default-psp created + rolebinding.rbac.authorization.k8s.io/rook-ceph-osd-psp created + rolebinding.rbac.authorization.k8s.io/rook-ceph-mgr-psp created + rolebinding.rbac.authorization.k8s.io/rook-ceph-cmd-reporter-psp created + serviceaccount/rook-csi-cephfs-plugin-sa created + serviceaccount/rook-csi-cephfs-provisioner-sa created + role.rbac.authorization.k8s.io/cephfs-external-provisioner-cfg created + rolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role-cfg created + clusterrole.rbac.authorization.k8s.io/cephfs-csi-nodeplugin created + clusterrole.rbac.authorization.k8s.io/cephfs-csi-nodeplugin-rules created + clusterrole.rbac.authorization.k8s.io/cephfs-external-provisioner-runner created + clusterrole.rbac.authorization.k8s.io/cephfs-external-provisioner-runner-rules created + clusterrolebinding.rbac.authorization.k8s.io/rook-csi-cephfs-plugin-sa-psp created + clusterrolebinding.rbac.authorization.k8s.io/rook-csi-cephfs-provisioner-sa-psp created + clusterrolebinding.rbac.authorization.k8s.io/cephfs-csi-nodeplugin created + clusterrolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role created + serviceaccount/rook-csi-rbd-plugin-sa created + serviceaccount/rook-csi-rbd-provisioner-sa created + role.rbac.authorization.k8s.io/rbd-external-provisioner-cfg created + rolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role-cfg created + clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin created + clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin-rules created + clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner created + clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner-rules created + clusterrolebinding.rbac.authorization.k8s.io/rook-csi-rbd-plugin-sa-psp created + clusterrolebinding.rbac.authorization.k8s.io/rook-csi-rbd-provisioner-sa-psp created + clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-nodeplugin created + clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role created + #+end_src + +* Create rook-ceph operator + + #+begin_src shell :dir ./rook/cluster/examples/kubernetes/ceph + kubectl create -f operator.yaml + #+end_src + + #+RESULTS: + #+begin_src shell + configmap/rook-ceph-operator-config created + deployment.apps/rook-ceph-operator created + #+end_src + +* List rook-ceph get pods + + #+begin_src shell + kubectl -n rook-ceph get pods + #+end_src + + #+RESULTS: + #+begin_src shell + NAME READY STATUS RESTARTS AGE + rook-ceph-operator-69f856fc5f-g4zzn 1/1 Running 0 63s + rook-discover-fb4vw 1/1 Running 0 40s + #+end_src + +* List current rook-ceph resources + + #+begin_src shell + kubectl -n rook-ceph get all + #+end_src + + #+RESULTS: + #+begin_src shell + NAME READY STATUS RESTARTS AGE + pod/rook-ceph-operator-69f856fc5f-g4zzn 1/1 Running 0 4m45s + pod/rook-discover-fb4vw 1/1 Running 0 4m22s + + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + daemonset.apps/rook-discover 1 1 1 1 1 4m22s + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/rook-ceph-operator 1/1 1 1 4m45s + + NAME DESIRED CURRENT READY AGE + replicaset.apps/rook-ceph-operator-69f856fc5f 1 1 1 4m45s + #+end_src + +* Create rook-cluster.yaml + + #+begin_src yaml :tangle rook-cluster.yaml :noweb yes + apiVersion: ceph.rook.io/v1 + kind: CephCluster + metadata: + name: rook-ceph + namespace: rook-ceph + spec: + cephVersion: + image: ceph/ceph:v14.2.8 + allowUnsupported: false + dataDirHostPath: /var/lib/rook + mon: + count: 1 + allowMultiplePerNode: false + dashboard: + enabled: true + ssl: false + monitoring: + enabled: false # requires Prometheus to be pre-installed + rulesNamespace: rook-ceph + network: + hostNetwork: false + storage: + useAllNodes: true + useAllDevices: false + deviceFilter: "^sd[cd]" + #+end_src + +* Use rook-cluster.yaml + + #+begin_src shell + kubectl create -f ./rook-cluster.yaml + #+end_src + + #+RESULTS: + #+begin_src shell + cephcluster.ceph.rook.io/rook-ceph created + #+end_src + +* List current rook-ceph resources + + #+begin_src shell + kubectl -n rook-ceph get all + #+end_src + + #+RESULTS: + #+begin_src shell + NAME READY STATUS RESTARTS AGE + pod/csi-cephfsplugin-provisioner-7b8fbf88b4-7zll7 4/4 Running 0 94s + pod/csi-cephfsplugin-provisioner-7b8fbf88b4-hmv4z 4/4 Running 0 94s + pod/csi-cephfsplugin-z96rn 3/3 Running 0 94s + pod/csi-rbdplugin-ddbmn 3/3 Running 0 94s + pod/csi-rbdplugin-provisioner-6b8b4d558c-b42nf 5/5 Running 0 94s + pod/csi-rbdplugin-provisioner-6b8b4d558c-b6tlv 5/5 Running 0 94s + pod/rook-ceph-crashcollector-kubemacs-worker-7b7679c58b-f6kc7 1/1 Running 0 31s + pod/rook-ceph-mgr-a-56d9855b98-7qhj2 1/1 Running 0 31s + pod/rook-ceph-mon-a-5bfd5d7979-q4vqj 1/1 Running 0 41s + pod/rook-ceph-operator-69f856fc5f-g4zzn 1/1 Running 0 106m + pod/rook-ceph-osd-prepare-kubemacs-worker-vndgd 0/1 Completed 0 19s + pod/rook-discover-fb4vw 1/1 Running 0 106m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/csi-cephfsplugin-metrics ClusterIP 10.96.69.103 8080/TCP,8081/TCP 94s + service/csi-rbdplugin-metrics ClusterIP 10.96.179.159 8080/TCP,8081/TCP 94s + service/rook-ceph-mgr ClusterIP 10.96.84.236 9283/TCP 19s + service/rook-ceph-mgr-dashboard ClusterIP 10.96.152.194 7000/TCP 31s + service/rook-ceph-mon-a ClusterIP 10.96.121.192 6789/TCP,3300/TCP 42s + + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + daemonset.apps/csi-cephfsplugin 1 1 1 1 1 94s + daemonset.apps/csi-rbdplugin 1 1 1 1 1 94s + daemonset.apps/rook-discover 1 1 1 1 1 106m + + NAME READY UP-TO-DATE AVAILABLE AGE + deployment.apps/csi-cephfsplugin-provisioner 2/2 2 2 94s + deployment.apps/csi-rbdplugin-provisioner 2/2 2 2 94s + deployment.apps/rook-ceph-crashcollector-kubemacs-worker 1/1 1 1 41s + deployment.apps/rook-ceph-mgr-a 1/1 1 1 31s + deployment.apps/rook-ceph-mon-a 1/1 1 1 41s + deployment.apps/rook-ceph-operator 1/1 1 1 106m + + NAME DESIRED CURRENT READY AGE + replicaset.apps/csi-cephfsplugin-provisioner-7b8fbf88b4 2 2 2 94s + replicaset.apps/csi-rbdplugin-provisioner-6b8b4d558c 2 2 2 94s + replicaset.apps/rook-ceph-crashcollector-kubemacs-worker-6845485f44 0 0 0 41s + replicaset.apps/rook-ceph-crashcollector-kubemacs-worker-7b7679c58b 1 1 1 31s + replicaset.apps/rook-ceph-mgr-a-56d9855b98 1 1 1 31s + replicaset.apps/rook-ceph-mon-a-5bfd5d7979 1 1 1 41s + replicaset.apps/rook-ceph-operator-69f856fc5f 1 1 1 106m + + NAME COMPLETIONS DURATION AGE + job.batch/rook-ceph-osd-prepare-kubemacs-worker 1/1 10s 19s + #+end_src + +* Rook Toolbox + +- [[https://rook.io/docs/rook/v1.2/ceph-toolbox.html][https://rook.io/docs/rook/v1.2/ceph-toolbox.html]] + +** setup toolbox + + #+begin_src shell :dir ./rook/cluster/examples/kubernetes/ceph + kubectl create -f toolbox.yaml + #+end_src + + #+RESULTS: + #+begin_src shell + deployment.apps/rook-ceph-tools created + #+end_src + +** Report toolbox pod name + + #+begin_src shell + kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" + #+end_src + + #+RESULTS: + #+begin_src shell + NAME READY STATUS RESTARTS AGE + rook-ceph-tools-565698c784-dx94x 1/1 Running 0 7s + #+end_src + +** Once the rook-ceph-tools pod is running, you can connect to it with: + + #+begin_src tmate + kubectl -n rook-ceph exec \ + -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" \ + -o jsonpath='{.items[0].metadata.name}') bash + #+end_src + +** Check on Ceph Status + + #+begin_src shell + kubectl -n rook-ceph exec \ + -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" \ + -o jsonpath='{.items[0].metadata.name}') -- ceph status + #+end_src + + #+RESULTS: + #+begin_src shell + cluster: + id: 2bb0b570-1b5e-4c4e-b8ad-578154bc06ed + health: HEALTH_WARN + OSD count 0 < osd_pool_default_size 3 + + services: + mon: 1 daemons, quorum a (age 61m) + mgr: a(active, since 61m) + osd: 0 osds: 0 up, 0 in + + data: + pools: 0 pools, 0 pgs + objects: 0 objects, 0 B + usage: 0 B used, 0 B / 0 B avail + pgs: + + #+end_src + +** More examples + +- ceph status +- ceph osd status +- ceph df +- rados df + + #+begin_src shell + kubectl -n rook-ceph exec -it \ + $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') \ + -- ceph -h | grep -i list + #+end_src + + #+RESULTS: + #+begin_src shell + daemonperf {type.id | path} list|ls [stat-pats] [priority] + List shows a table of all available stats + auth ls list authentication state + balancer ls List all plans + balancer pool ls List automatic balancing pools. Note + that empty list means all existing + config ls List available configuration options + config-key ls list keys + dashboard iscsi-gateway-list List iSCSI gateways + fs ls list filesystems + fs subvolume ls { List subvolume snapshots + fs subvolumegroup ls List subvolumegroups + fs subvolumegroup snapshot ls [. List devices on a node + orchestrator host ls List hosts + orchestrator service ls {} {mon| List services known to orchestrator + osd blacklist add|rm add (optionally until seconds + blacklist + osd blacklist clear clear all blacklisted clients + osd blacklist ls show blacklisted clients + osd crush class ls list all crush device classes + osd crush class ls-osd list all osds belonging to the specific + osd crush ls list items beneath a node in the CRUSH + osd crush rule ls list crush rules + osd crush rule ls-by-class list all crush rules that reference the + osd crush weight-set ls list crush weight sets + osd erasure-code-profile ls list all erasure code profiles + osd pool ls {detail} list pools + pg ls {} { [...]} list pg with specific pool, osd, state + pg ls-by-osd list pg on osd [osd] + pg ls-by-pool { list pg with pool = [poolname] + pg ls-by-primary list pg with primary = [osd] + rbd task list {} List pending or running asynchronous + restful list-keys List all API keys + #+end_src + +** Remove Toolbox + + #+begin_src shell + kubectl -n rook-ceph delete deployment rook-ceph-tools + #+end_src + + #+RESULTS: + #+begin_src shell + deployment.apps "rook-ceph-tools" deleted + #+end_src + +* Ceph Dashboard + +- [[https://github.com/rook/rook/blob/master/Documentation/ceph-dashboard.md][https://github.com/rook/rook/blob/master/Documentation/ceph-dashboard.md]] + + + #+begin_src shell + kubectl -n rook-ceph get service + #+end_src + + #+RESULTS: + #+begin_src shell + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + csi-cephfsplugin-metrics ClusterIP 10.96.69.103 8080/TCP,8081/TCP 94m + csi-rbdplugin-metrics ClusterIP 10.96.179.159 8080/TCP,8081/TCP 94m + rook-ceph-mgr ClusterIP 10.96.84.236 9283/TCP 93m + rook-ceph-mgr-dashboard ClusterIP 10.96.152.194 7000/TCP 93m + rook-ceph-mon-a ClusterIP 10.96.121.192 6789/TCP,3300/TCP 93m + #+end_src + +** Auth + + #+begin_src shell + kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo + #+end_src + +* Storage Setup + +- [[https://rook.io/docs/rook/v1.2/ceph-examples.html][https://rook.io/docs/rook/v1.2/ceph-examples.html]] diff --git a/research/ruby_to_pull_bgp_asn.org b/research/ruby_to_pull_bgp_asn.org new file mode 100644 index 0000000..ac201d6 --- /dev/null +++ b/research/ruby_to_pull_bgp_asn.org @@ -0,0 +1,35 @@ +#+TITLE: Ruby To Pull Bgp Asn +Found the following repo that seems to poll and query asn data +https://github.com/codeout/transitive_bgp_community +* get the repo +EXEC_USER=${EXEC_USER:-root} +EXEC_PWD=${EXEC_PWD} +** Setup +#+BEGIN_SRC tmate :window ruby_setup +EXEC_USER=ii host-shell touch ruby123 +#+END_SRC + + +#+BEGIN_SRC tmate :window ruby_setup +ls -al /var/run/host/home/ii/ +#+END_SRC + + +** WIP + + +#+BEGIN_SRC tmate :window ruby_test :dir (getenv "HOME") +mkdir -p ruby_test; cd ruby_test +#+END_SRC + + +#+BEGIN_SRC tmate :window ruby_test :dir (concat (getenv "HOME") "/ruby_test") +EXEC_USER=ii EXEC_PWD=/home/ii/ruby_test host-shell git clone https://github.com/codeout/transitive_bgp_community +#+END_SRC + +#+BEGIN_SRC tmate :window ruby_container :dir (concat (getenv "HOME") "/ruby_test") +docker run -u 1000:1000 -d --rm -it -v /home/ii/ruby_test:/app ruby:2.4 /bin/bash +#+END_SRC +#+BEGIN_SRC tmate :window ruby_container :dir (concat (getenv "HOME") "/ruby_test") +docker exec -it ruby:2.4 /bin/bash +#+END_SRC diff --git a/research/shadowserver_asn.org b/research/shadowserver_asn.org new file mode 100644 index 0000000..d3d059e --- /dev/null +++ b/research/shadowserver_asn.org @@ -0,0 +1,187 @@ +#+TITLE: Shadowserver_asn +Goal is to get ASN to CIDR match data from https://www.shadowserver.org/ +They are a paid service, but with free api calls for under 5 calls per second +For this test I will stay under the free limit +If it works we can look at upgrading our plan or having the CNCF parner with them. +* Test the service +They have a very simple api and with +#+BEGIN_SRC shell +curl https://api.shadowserver.org/net/asn\?prefix\=84 +#+END_SRC + +#+RESULTS: +#+begin_example +["130.46.0.0/16","192.5.27.0/24","192.5.47.0/24","192.91.138.0/24","198.97.79.0/24"] +#+end_example + +* Find full list of ASN numbers +There are lots out there, https://www.iana.org/numbers distributes the numbers but I struggled to get one list from them. +Peeringdb has only about 10k entries +arin.net has about 20k entries, that seem to be the missing se from peeringdb +Best I could find is https://bgp.potaroo.net/cidr/autnums.html the page has amazing information +I am going to use that as my source of asn-company +TODO: clean up the curl to populate a csv we can use to upload to BQ +#+BEGIN_SRC shell +curl https://bgp.potaroo.net/cidr/autnums.html | tail +#+END_SRC + +#+RESULTS: +#+begin_example +AS399722 AS-MS-78065, US +AS399723 LYNXXNETWORKS01, US +AS399724 DIGICEL-STVINCENT, LC +AS401308 NOMAD-AIR, US + +
+File last modified at Mon May 31 18:14:41 2021 + (UTC+1000) + + +#+end_example + +* Lets marry ASN numbers with results from shadowserver +I realize this is imbarrising I can get the csv just with jq, but this worked for now +#+BEGIN_SRC tmate :window curl-asn +while IFS='' read -r LINE || [ -n "${LINE}" ]; do +sleep .2 | curl https://api.shadowserver.org/net/asn\?prefix\=$LINE | jq '.' | sed -e "s/^/\"$LINE\",/" | grep -E -v '\",\[|\",\]' >> /tmp/asnTest.csv +done < /home/ii/foo/asnNumbersOnly.txt +#+END_SRC + +* Transform cidr to range using postgres +** Bring up Postgres +#+BEGIN_SRC tmate :window postgres +docker run -it --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_DB=ii postgres:12.2-alpine +#+END_SRC +#+BEGIN_SRC sql-mode +\dn +--SELECT schemaname, tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + List of schemas + Name | Owner +--------+---------- + public | postgres +(1 row) +#+end_SRC + +** Load csv into pg +#+BEGIN_SRC sql-mode +--create table shadowserver_ip_asn (ip cidr); +create table shadowserver_ip_asn (asn int, ip cidr); +\COPY shadowserver_ip_asn from '/tmp/asnTest.csv' DELIMITER ',' CSV; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +#+end_SRC + +Confirmation: +#+BEGIN_SRC sql-mode +select * from shadowserver_ip_asn limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + asn | ip +-----+------------------ + 1 | 12.51.30.0/24 + 1 | 12.52.182.0/24 + 1 | 91.227.30.0/24 + 1 | 205.207.214.0/24 + 1 | 212.94.84.0/22 + 2 | 12.35.70.0/23 + 2 | 31.129.245.0/24 + 2 | 91.143.144.0/20 + 2 | 103.77.60.0/24 + 2 | 103.77.61.0/24 +(10 rows) + +#+end_SRC + +Split that into start and end +#+BEGIN_SRC sql-mode +select asn as asn, +ip as ip, +host(network(ip)::inet) as ip_start, +host(broadcast(ip)::inet) as ip_end +into table shadow_ip_asn_extended +from shadowserver_ip_asn; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +SELECT 518099 +#+end_SRC + +#+BEGIN_SRC sql-mode +select * from shadow_ip_asn_extended limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + asn | ip | ip_start | ip_end +-----+------------------+---------------+----------------- + 1 | 12.51.30.0/24 | 12.51.30.0 | 12.51.30.255 + 1 | 12.52.182.0/24 | 12.52.182.0 | 12.52.182.255 + 1 | 91.227.30.0/24 | 91.227.30.0 | 91.227.30.255 + 1 | 205.207.214.0/24 | 205.207.214.0 | 205.207.214.255 + 1 | 212.94.84.0/22 | 212.94.84.0 | 212.94.87.255 + 2 | 12.35.70.0/23 | 12.35.70.0 | 12.35.71.255 + 2 | 31.129.245.0/24 | 31.129.245.0 | 31.129.245.255 + 2 | 91.143.144.0/20 | 91.143.144.0 | 91.143.159.255 + 2 | 103.77.60.0/24 | 103.77.60.0 | 103.77.60.255 + 2 | 103.77.61.0/24 | 103.77.61.0 | 103.77.61.255 +(10 rows) + +#+end_SRC + +#+begin_src sql-mode +\copy (select * from shadow_ip_asn_extended) to '/tmp/shadow_expanded_ipv4.csv' csv header; +#+end_src + +#+RESULTS: +#+begin_SRC example +COPY 518099 +#+end_SRC + +#+begin_src shell +bq load --autodetect k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended /tmp/shadow_expanded_ipv4.csv +#+end_src + +#+RESULTS: +#+begin_example +#+end_example + +** Create table with asn, ips as ints +#+BEGIN_SRC shell +bq query --nouse_legacy_sql \ +' +SELECT + asn as asn, + ip as cidr_ip, + ip_start as start_ip, + ip_end as end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_start)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(ip_end)) AS end_ip + from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended` + WHERE regexp_contains(ip_start, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"); +' +#+END_SRC +** Run a lookup of cust ips against asn lookup +There are 4 million distinct ips +Right now this query fails, +Riaan will break the 4 million ips into smaller chunks and try again +#+BEGIN_SRC shell +bq query --nouse_legacy_sql \ +' +select asn, +c_ip, +from `k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.distinct_ip_with_count_of_ip_ipv4_only_int`, +`k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.shadow_ip_asn_extended_int` +where +c_ip_int >= start_ip_int and c_ip_int <= end_ip_1; +' + +#+END_SRC diff --git a/research/sharing.io/implementation.org b/research/sharing.io/implementation.org new file mode 100644 index 0000000..c3baf5a --- /dev/null +++ b/research/sharing.io/implementation.org @@ -0,0 +1,68 @@ +#+TITLE: Sharing.io Planning Docs +* Goal + Have a POC ready in one week that shows how we can create boxes, and clusters, for pairing super simply and elegantly. + This setup should be flexible enough that we could deploy envoy to a cluster and then create a pairing environment in this cluster with minimal steps. + This does not need to be stable enough to be used outside of our team, it is a proof of concept. +* Context/Current Setup + There is a program called syme that will create an aws machine with your and your invitees credentials on it, making it simple to ssh in and share a tmux session. + We forked this program, the fork is the repo we are in currently. + We also created a cluster-api object, and a setup shell script, that can make a machine and cluster on packet that has all our apisnoop/ii-testing related needs _and_ gets emacs up inside a tmate session. + + Our current plan is to customize syme to utilize this cluster-api object so that we can fill out a form with some invitees, and have them now in a shared environment on packet ready to work. +* Questions +** What work is required to create on packet with cluster-api v. AWS +Reworking 70% (or more) of the code, which happens to be in a single file (src/syme/instance.clj). + +** After that work is done, how much of syme remains? +A lot less + +** How new to us is this work? + Is it just adjusting some configs and some basic string manipulation to make stuff on packet instead? Or will we be writing new libraries in new languages? + + We could potentially do a shell call that passes along some env var, but it's def bad practice...we'd be creating stateful containers and inviting messiness, and those may not be good if we are using htis as a demo for other devop infra teams. + + To do this cleanly, it would require figuring out the java libraries for packet and kubernetes well enough to know how to transpose it into clojure (it wouldn't be a cut and paste but a translation) and then re-write all the instance functions to use our setup instead. This is an unknown unknown for the amount of work and time. + +** Is this work faster to implement in golang? + Yes. Go libraries for what we're trying to do (Cluster-API) already exist, unlike the Java -> Clojure equivalents. The core of syme would need to be rewritten, and the other stuff (like the oauth form) can be detached from the core instance-creating. In other words, we don't think we are bound to the entirety of syme and could potentially take what works well without having to rewrite it. + +** What are the pro/cons of syme v. new thing in golang? +*** Pro of using golang +- Cluster-API stuff will have 100% less cruft and will require no generating of a file which lands on the filesystem. +*** Con of using golang + - we would have to write everything, though it'd likely be less code and cleaner. +*** Pro of using syme + - it's already written + - clojure is elegant +*** cons + - the core part is not written, we'd still be having to do a lot of brand-new code + - there's a lot of transposing (Java->Clojure->Kubernetes) + - much of the new code would have to be written in java, which is not a comfortable language for any of us. +** Can we use both? + don't rewrite everything, but rewrite the hard stuff in go...like have our form post to a diff. server so that the logic is being handled in golang but everything else is already written? + +Possibly, the state and stuff can be stored as a CRD in Kubernetes which is accessed through the golang portion. +The golang portion is responsible for managing the boxes via Cluster-API. +The clojure portion is responsible for OAuth and the UI. +This would potentially be the quickest solution, that lets us retain the most code and the new stuff can be written in a language with existing libraries for this exact usecase. + +* Notes +- using Cluster-API, there is a current known problem (at least within ii) for boxes not deleting when resources are being requested to be deleted + +* [1/1] Tasks +** DONE Answer these questions + CLOSED: [2020-09-28 Mon 16:37] + +** TODO Get sign off for hybrid approach +** TODO Create golang api server +*** TODO retention +- delete after 1 day +*** TODO limits +**** instances +- 2 max +**** misc +- IPs +** TODO Create golang instance creator +** TODO write syme /launch to post to golang api server +** TODO write syme /delete to post to golang api server +** TODO test and celebrate diff --git a/research/unify-controller-management.org b/research/unify-controller-management.org new file mode 100644 index 0000000..b0f582b --- /dev/null +++ b/research/unify-controller-management.org @@ -0,0 +1,189 @@ +#+TITLE: Unify Controller Management + +* Running the management container locally in Podman +#+begin_src tmate :window unify +podman run \ + -it \ + --rm \ + -v $HOME/work/ii/unify-controller:/config:Z \ + -e PUID=1000 \ + -e PGID=1000 \ + -e TZ=Pacific/Auckland \ + --network host \ + --name unify-controller \ + lscr.io/linuxserver/unifi-controller:6.5.54 +#+end_src + +* Deploy in Kubernetes +#+begin_src yaml :tangle ./unify-controller.yaml +--- +apiVersion: v1 +kind: Namespace +metadata: + name: unify-controller +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: unify-controller + namespace: unify-controller +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + storageClassName: local-path +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unify-controller + namespace: unify-controller +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: unify-controller + template: + metadata: + labels: + app: unify-controller + spec: + hostNetwork: true + containers: + - name: unify-controller + image: lscr.io/linuxserver/unifi-controller:6.5.54 + ports: + - containerPort: 8443 + env: + - name: TZ + value: Pacific/Auckland + - name: PUID + value: "1000" + - name: PGID + value: "1000" + volumeMounts: + - name: unify-controller + mountPath: /config + volumes: + - name: unify-controller + persistentVolumeClaim: + claimName: unify-controller +--- +apiVersion: v1 +kind: Service +metadata: + name: unify-controller + namespace: unify-controller +spec: + type: ClusterIP + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + selector: + app: unify-controller +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/whitelist-source-range: 192.168.1.0/24 + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + name: unify-controller + namespace: unify-controller +spec: + rules: + - host: unify.hackbach.nz + http: + paths: + - backend: + service: + name: unify-controller + port: + number: 8443 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - unify.hackbach.nz + secretName: letsencrypt-prod +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod-hackbach +spec: + acme: + email: hackbach-nz@ii.coop + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod-hackbach + namespace: unify-controller +spec: + dnsNames: + - unify.hackbach.nz + issuerRef: + kind: ClusterIssuer + name: letsencrypt-prod-hackbach + secretName: letsencrypt-prod +#+end_src + +Install Unify-Controller +#+begin_src shell +kubectl apply -f ./unify-controller.yaml +#+end_src + +#+RESULTS: +#+begin_example +namespace/unify-controller unchanged +persistentvolumeclaim/unify-controller unchanged +deployment.apps/unify-controller unchanged +service/unify-controller unchanged +ingress.networking.k8s.io/unify-controller configured +clusterissuer.cert-manager.io/letsencrypt-prod-hackbach unchanged +certificate.cert-manager.io/letsencrypt-prod-hackbach configured +#+end_example + +* Resetting a password +For some reason the Unify Controller container exposes an insecure MongoDB port for accessing the database. + +** Installing MongoDB cli +Tangle in the repo file +#+begin_src conf :tangle /tmp/mongodb-org-4.4.repo +[Mongodb] +name=MongoDB Repository +baseurl=https://repo.mongodb.org/yum/redhat/8/mongodb-org/4.4/x86_64/ +gpgcheck=1 +enabled=1 +gpgkey=https://www.mongodb.org/static/pgp/server-4.4.asc +#+end_src + +Copy the repo file to the repos folder +#+begin_src shell +sudo cp /tmp/mongodb-org-4.4.repo /etc/yum.repos.d/. +#+end_src + +Install MongoDB +#+begin_src shell +sudo dnf install -y mongodb-org +#+end_src + +** Performing the reset +The following command runs against Unify Controller MongoDB a command, in the ace db, under the admin collection, to set the x_shadow field for the password; setting it to a hash of password123. +#+begin_src shell +MONGODB_IP=localhost +mongo $MONGODB_IP:27117/ace < <(echo 'db.admin.update({"name": "hackbach"},{$set:{"x_shadow":"$6$OzJJ0heL$XyD5qt4pviLieuj8CMFbnSc9VYvxDyzYpH7dHC8wmaLwKv9xwyDxBiMx3GcT8nEdIa7XJbqlZo39jhfbQBXRM/"}} )') +#+end_src diff --git a/research/unify-controller.yaml b/research/unify-controller.yaml new file mode 100644 index 0000000..0c5b566 --- /dev/null +++ b/research/unify-controller.yaml @@ -0,0 +1,129 @@ +# Deploy in Kubernetes + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: unify-controller +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: unify-controller + namespace: unify-controller +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi + storageClassName: local-path +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unify-controller + namespace: unify-controller +spec: + strategy: + type: Recreate + selector: + matchLabels: + app: unify-controller + template: + metadata: + labels: + app: unify-controller + spec: + hostNetwork: true + containers: + - name: unify-controller + image: lscr.io/linuxserver/unifi-controller:6.5.54 + # image: alpine:3.14 + # command: + # - sleep + # - infinity + ports: + - containerPort: 8443 + env: + - name: TZ + value: Pacific/Auckland + - name: PUID + value: "1000" + - name: PGID + value: "1000" + volumeMounts: + - name: unify-controller + mountPath: /config + volumes: + - name: unify-controller + persistentVolumeClaim: + claimName: unify-controller +--- +apiVersion: v1 +kind: Service +metadata: + name: unify-controller + namespace: unify-controller +spec: + type: ClusterIP + ports: + - port: 8443 + targetPort: 8443 + protocol: TCP + selector: + app: unify-controller +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/whitelist-source-range: 192.168.1.0/24 + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + name: unify-controller + namespace: unify-controller +spec: + rules: + - host: unify.hackbach.nz + http: + paths: + - backend: + service: + name: unify-controller + port: + number: 8443 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - unify.hackbach.nz + secretName: letsencrypt-prod +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod-hackbach +spec: + acme: + email: hackbach-nz@ii.coop + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + - http01: + ingress: + class: nginx +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: letsencrypt-prod-hackbach + namespace: unify-controller +spec: + dnsNames: + - unify.hackbach.nz + issuerRef: + kind: ClusterIssuer + name: letsencrypt-prod-hackbach + secretName: letsencrypt-prod diff --git a/research/which_cloud_ip_lookup.org b/research/which_cloud_ip_lookup.org new file mode 100644 index 0000000..86352fc --- /dev/null +++ b/research/which_cloud_ip_lookup.org @@ -0,0 +1,329 @@ +#+TITLE: Which Cloud Ip Lookup +Investigation of https://pypi.org/project/which-cloud/#files +Based on https://github.com/SoundOn/which-cloud +* Get the repo +#+BEGIN_SRC shell +git clone https://github.com/SoundOn/which-cloud.git +#+END_SRC + +* Look at json repo points to: +For the big 3 the repo points to 3 locations for json: +GCP-document: https://cloud.google.com/compute/docs/faq#find_ip_range +GCP-data: https://www.gstatic.com/ipranges/cloud.json +AWS-document: https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html +AWS-data: https://ip-ranges.amazonaws.com/ip-ranges.json +Azure-document: https://www.microsoft.com/en-us/download/details.aspx?id=56519 +Azure-data: https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20201109.json +** Look at GCP json +###+BEGIN_SRC tmate :window SHELL + +#+BEGIN_SRC shell +curl 'https://www.gstatic.com/ipranges/cloud.json' | jq '.' | head +#+END_SRC + +#+RESULTS: +#+begin_example +{ + "syncToken": "1622048579440", + "creationTime": "2021-05-26T10:02:59.44", + "prefixes": [ + { + "ipv4Prefix": "34.80.0.0/15", + "service": "Google Cloud", + "scope": "asia-east1" + }, + { +#+end_example + +** Look at AWS json +###+BEGIN_SRC tmate :window SHELL + +#+BEGIN_SRC shell +curl 'https://ip-ranges.amazonaws.com/ip-ranges.json' | jq '.' | head +#+END_SRC + +#+RESULTS: +#+begin_example +{ + "syncToken": "1622066052", + "createDate": "2021-05-26-21-54-12", + "prefixes": [ + { + "ip_prefix": "3.5.140.0/22", + "region": "ap-northeast-2", + "service": "AMAZON", + "network_border_group": "ap-northeast-2" + }, +#+end_example + +** Look at AWS json +###+BEGIN_SRC tmate :window SHELL +###+BEGIN_SRC shell +Note this url seems to change by date, make sure any automation accounts for it +#+BEGIN_SRC shell +curl 'https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20210524.json' | jq '.' | head +#+END_SRC + +#+RESULTS: +#+begin_example +{ + "changeNumber": 149, + "cloud": "Public", + "values": [ + { + "name": "ActionGroup", + "id": "ActionGroup", + "properties": { + "changeNumber": 9, + "region": "", +#+end_example + +* Push to bq +Login +#+BEGIN_SRC tmate gcloud-auth +gcloud auth login +#+END_SRC +Set project +#+BEGIN_SRC tmate gcloud-auth +gcloud config set project k8s-infra-ii-sandbox +#+END_SRC +Confirm +#+begin_src shell +gcloud config list --format 'value(core.project)' 2>/dev/null +#+end_src +#+RESULTS: +#+begin_example +k8s-infra-ii-sandbox +#+end_example + +** Get google cloud.json local and load it into bq --FAILED +Simple local download for now +#+BEGIN_SRC shell +wget https://www.gstatic.com/ipranges/cloud.json +#+END_SRC +#+BEGIN_SRC shell +ls -al | grep cloud.json +#+END_SRC +Confirm +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 43248 May 27 05:08 cloud.json +#+end_example +Load into bq +mmmm it is getting errors... before I troubleshoot, let me try the same with aws +#+begin_src tmate :window bq-load +bq load --autodetect k8s_artifacts_dataset_bb_test.gcloud_ipranges_json ./cloud.json +#+end_src +#+BEGIN_SRC shell +wget https://ip-ranges.amazonaws.com/ip-ranges.json +#+END_SRC + +#+BEGIN_SRC shell +ls -al | grep ip-ranges +#+END_SRC +#+RESULTS: +#+begin_example +-rw-r--r-- 1 ii ii 846881 May 27 10:58 ip-ranges.json +#+end_example +#+begin_src tmate :window bq-load +bq load --autodetect k8s_artifacts_dataset_bb_test.amazon_ipranges_json ./ip-ranges.json +#+end_src +** Load the data in postgres for pre-processing +Bring up Postgres +#+BEGIN_SRC tmate :window postgres +docker run -it --rm -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_DB=ii postgres:12.2-alpine +#+END_SRC +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "") +mkdir json_dumps && cd json_dumps +#+END_SRC +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +wget 'https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20210524.json' && +wget 'https://ip-ranges.amazonaws.com/ip-ranges.json' && +wget 'https://www.gstatic.com/ipranges/cloud.json' +#+END_SRC + +Confirm we can connect: +#+BEGIN_SRC sql-mode +\dn +--SELECT schemaname, tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema'; +#+END_SRC + +Lets load some data into postgres +Lets put down a sql file that will let us load the data +#+BEGIN_SRC sql :tangle (concat (getenv "HOME") "/json_dumps/dump_loads.sql") +-- create table jsonraw(data jsonb); +\copy jsonraw(data) from '/home/ii/json_dumps/cloud.json' csv quote e'\x01' delimiter e'\x02'; +#+END_SRC +Lets see what we get +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +psql -U postgres -d ii -h $SHARINGIO_PAIR_LOAD_BALANCER_IP < dump_loads.sql +#+END_SRC +Nope, here is a direct run to document the error. +#+BEGIN_SRC sql-mode + \copy jsonraw(data) from '/home/ii/json_dumps/cloud.json' csv quote e'\x01' delimiter e'\x02'; +#+END_SRC + + +Next I need to figure out what is wrong with my json, +Below I did a quick poke around with jq and I can navigate elements +That means the json is valid? + +Guessing it is some convention, will poke around more later + +#+BEGIN_SRC shell +cat /home/ii/json_dumps/cloud.json | jq '.prefixes[0].scope' | head +#+END_SRC + +#+RESULTS: +#+begin_example +"asia-east1" +#+end_example + +** Decision, I am going to output jsut the addresses and upload those to pg, done futsing with pg/bq parsing +Lets go make sure we can get to just ips +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +curl https://ip-ranges.amazonaws.com/ip-ranges.json | jq '.prefixes[].ip_prefix' > amazon_ipranges_only.txt +#+END_SRC +*** Turns out it is not needed to convert it to csv, still interesting: +Making it into a csv +From https://stackoverflow.com/questions/1251999/sed-how-can-i-replace-a-newline-n : +- Create a label via :a +- Append the current and next line to the pattern space via N +- If we are before the last line, branch to the created label $!ba ($! means not to do it on the last line (as there should be one final newline)). +- Finally the substitution replaces every newline with a comma on the pattern space (which is the whole file). +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +sed -i ':a;N;$!ba;s/\n/,/g' amazon_ipranges_only.txt +#+END_SRC +Remove all quotes +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +sed -i 's/"//g' amazon_ipranges_only.txt +#+END_SRC +*** Load amazon data to single table +#+BEGIN_SRC sql-mode +create table amazon_ips_only (ip cidr); +\COPY amazon_ips_only from '/home/ii/json_dumps/amazon_ipranges_only.txt' DELIMITER ',' CSV; +#+END_SRC +Confirmation: +#+BEGIN_SRC sql-mode +select * from amazon_ips_only limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + ip +------------------ + 3.5.140.0/22 + 13.34.37.64/27 + 15.230.56.104/31 + 35.180.0.0/16 + 52.93.153.170/32 + 52.93.178.234/32 + 52.94.76.0/22 + 52.95.36.0/22 + 52.219.170.0/23 + 99.87.32.0/22 +(10 rows) + +#+end_SRC + +Split that into start and end +#+BEGIN_SRC sql-mode +select ip as ip, +host(network(ip)::inet) as ip_start, +host(broadcast(ip)::inet) as ip_end +into table expanded_ip3 +from amazon_ips_only; +#+END_SRC + +#+RESULTS: +#+begin_SRC example +SELECT 4541 +#+end_SRC + +#+BEGIN_SRC sql-mode +select * from expanded_ip3 limit 10; +#+END_SRC + +#+RESULTS: +#+begin_SRC example + ip | ip_start | ip_end +------------------+---------------+---------------- + 3.5.140.0/22 | 3.5.140.0 | 3.5.143.255 + 13.34.37.64/27 | 13.34.37.64 | 13.34.37.95 + 15.230.56.104/31 | 15.230.56.104 | 15.230.56.105 + 35.180.0.0/16 | 35.180.0.0 | 35.180.255.255 + 52.93.153.170/32 | 52.93.153.170 | 52.93.153.170 + 52.93.178.234/32 | 52.93.178.234 | 52.93.178.234 + 52.94.76.0/22 | 52.94.76.0 | 52.94.79.255 + 52.95.36.0/22 | 52.95.36.0 | 52.95.39.255 + 52.219.170.0/23 | 52.219.170.0 | 52.219.171.255 + 99.87.32.0/22 | 99.87.32.0 | 99.87.35.255 +(10 rows) + +#+end_SRC + +#+begin_src sql-mode +\copy (select * from expanded_ip3) to '~/amazon_expanded_ipv4.csv' csv header; +#+end_src + +#+RESULTS: +#+begin_SRC example +COPY 4541 +#+end_SRC + +#+begin_src shell +bq load --autodetect k8s_artifacts_dataset_bb_test.amazon_initial_expanded_ipv4 /home/ii/amazon_expanded_ipv4.csv +#+end_src + +#+BEGIN_SRC shell +bq query --nouse_legacy_sql \ +' +select * from k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.amazon_initial_expanded_ipv4 limit 10; +' +#+END_SRC + +#+RESULTS: +#+begin_example ++----------------+----------------+----------------+ +| string_field_0 | string_field_1 | string_field_2 | ++----------------+----------------+----------------+ +| ip | ip_start | ip_end | +| 3.0.5.32/29 | 3.0.5.32 | 3.0.5.39 | +| 3.4.2.0/27 | 3.4.2.0 | 3.4.2.31 | +| 3.4.2.0/27 | 3.4.2.0 | 3.4.2.31 | +| 3.2.0.0/24 | 3.2.0.0 | 3.2.0.255 | +| 3.2.0.0/24 | 3.2.0.0 | 3.2.0.255 | +| 3.2.2.0/24 | 3.2.2.0 | 3.2.2.255 | +| 3.2.2.0/24 | 3.2.2.0 | 3.2.2.255 | +| 3.2.3.0/24 | 3.2.3.0 | 3.2.3.255 | +| 3.2.3.0/24 | 3.2.3.0 | 3.2.3.255 | ++----------------+----------------+----------------+ +#+end_example + +#+BEGIN_SRC shell +bq query --nouse_legacy_sql \ +' +SELECT + "amazon" as name, + string_field_0 as cidr_ip, + string_field_1 as start_ip, + string_field_2 end_ip, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(string_field_1)) AS start_ip_int, + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(string_field_2)) AS end_ip + from k8s-infra-ii-sandbox.k8s_artifacts_dataset_bb_test.amazon_initial_expanded_ipv4 + WHERE regexp_contains(string_field_1, r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}") limit 10; +' +#+END_SRC + +#+RESULTS: +#+begin_example +#+end_example + +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +#+END_SRC + +#+BEGIN_SRC tmate :window pg-load :dir (concat (getenv "HOME") "/json_dumps") +#+END_SRC + + NET.IPV4_TO_INT64(NET.IP_FROM_STRING(end_ip)) AS end_ip +FROM k8s-infra-ii-sandbox.k8s_artifacts_gcslogs_appspot.peeringdb_expanded_ipv4_20210524 diff --git a/revealjs/.github/CONTRIBUTING.md b/revealjs/.github/CONTRIBUTING.md new file mode 100644 index 0000000..bd0fab2 --- /dev/null +++ b/revealjs/.github/CONTRIBUTING.md @@ -0,0 +1,21 @@ +## Contributing +Please keep the [issue tracker](https://github.com/hakimel/reveal.js/issues) limited to **bug reports**. + + +### General Questions and Support +If you have questions about how to use reveal.js the best place to ask is in the [Discussions](https://github.com/hakimel/reveal.js/discussions). Anything that isn't a bug report should be posted as a dicussion instead. + + +### Bug Reports +When reporting a bug make sure to include information about which browser and operating system you are on as well as the necessary steps to reproduce the issue. If possible please include a link to a sample presentation where the bug can be tested. + + +### Pull Requests +- Should be submitted from a feature/topic branch (not your master) +- Should follow the coding style of the file you work in, most importantly: + - Tabs to indent + - Single-quoted strings + + +### Plugins +Please do not submit plugins as pull requests. They should be maintained in their own separate repository. More information here: https://github.com/hakimel/reveal.js/wiki/Plugin-Guidelines diff --git a/revealjs/.github/FUNDING.yml b/revealjs/.github/FUNDING.yml new file mode 100644 index 0000000..972831e --- /dev/null +++ b/revealjs/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [hakimel] diff --git a/revealjs/.github/workflows/js.yml b/revealjs/.github/workflows/js.yml new file mode 100644 index 0000000..af86bea --- /dev/null +++ b/revealjs/.github/workflows/js.yml @@ -0,0 +1,27 @@ +name: tests + +on: [push] + +permissions: + contents: read + +jobs: + build: + + runs-on: ubuntu-latest + + strategy: + matrix: + node-version: [14.x] + + steps: + - uses: actions/checkout@v2 + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v1 + with: + node-version: ${{ matrix.node-version }} + - run: npm install + - run: npm run build --if-present + - run: npm test + env: + CI: true diff --git a/revealjs/.gitignore b/revealjs/.gitignore new file mode 100644 index 0000000..ba5aa84 --- /dev/null +++ b/revealjs/.gitignore @@ -0,0 +1,11 @@ +.idea/ +*.iml +*.iws +*.eml +out/ +.DS_Store +.svn +log/*.log +tmp/** +node_modules/ +.sass-cache \ No newline at end of file diff --git a/revealjs/.npmignore b/revealjs/.npmignore new file mode 100644 index 0000000..50c12b9 --- /dev/null +++ b/revealjs/.npmignore @@ -0,0 +1,7 @@ +/test +/examples +.github +.gulpfile +.sass-cache +gulpfile.js +CONTRIBUTING.md \ No newline at end of file diff --git a/revealjs/LICENSE b/revealjs/LICENSE new file mode 100644 index 0000000..0de9fdd --- /dev/null +++ b/revealjs/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2011-2023 Hakim El Hattab, http://hakim.se, and reveal.js contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/revealjs/README.md b/revealjs/README.md new file mode 100644 index 0000000..db584dc --- /dev/null +++ b/revealjs/README.md @@ -0,0 +1,50 @@ +

+ + reveal.js + +

+ + Slides +

+ +reveal.js is an open source HTML presentation framework. It enables anyone with a web browser to create beautiful presentations for free. Check out the live demo at [revealjs.com](https://revealjs.com/). + +The framework comes with a powerful feature set including [nested slides](https://revealjs.com/vertical-slides/), [Markdown support](https://revealjs.com/markdown/), [Auto-Animate](https://revealjs.com/auto-animate/), [PDF export](https://revealjs.com/pdf-export/), [speaker notes](https://revealjs.com/speaker-view/), [LaTeX typesetting](https://revealjs.com/math/), [syntax highlighted code](https://revealjs.com/code/) and an [extensive API](https://revealjs.com/api/). + +--- + +Want to create reveal.js presentation in a graphical editor? Try . It's made by the same people behind reveal.js. + +--- + +### Sponsors +Hakim's open source work is supported by GitHub sponsors. Special thanks to: + + +--- + +### Getting started +- 🚀 [Install reveal.js](https://revealjs.com/installation) +- 👀 [View the demo presentation](https://revealjs.com/demo) +- 📖 [Read the documentation](https://revealjs.com/markup/) +- 🖌 [Try the visual editor for reveal.js at Slides.com](https://slides.com/) +- 🎬 [Watch the reveal.js video course (paid)](https://revealjs.com/course) + +--- +
+ MIT licensed | Copyright © 2011-2023 Hakim El Hattab, https://hakim.se +
diff --git a/revealjs/css/layout.scss b/revealjs/css/layout.scss new file mode 100644 index 0000000..f499fdd --- /dev/null +++ b/revealjs/css/layout.scss @@ -0,0 +1,69 @@ +/** + * Layout helpers. + */ + +// Stretch an element vertically based on available space +.reveal .stretch, +.reveal .r-stretch { + max-width: none; + max-height: none; +} + +.reveal pre.stretch code, +.reveal pre.r-stretch code { + height: 100%; + max-height: 100%; + box-sizing: border-box; +} + +// Text that auto-fits its container +.reveal .r-fit-text { + display: inline-block; // https://github.com/rikschennink/fitty#performance + white-space: nowrap; +} + +// Stack multiple elements on top of each other +.reveal .r-stack { + display: grid; +} + +.reveal .r-stack > * { + grid-area: 1/1; + margin: auto; +} + +// Horizontal and vertical stacks +.reveal .r-vstack, +.reveal .r-hstack { + display: flex; + + img, video { + min-width: 0; + min-height: 0; + object-fit: contain; + } +} + +.reveal .r-vstack { + flex-direction: column; + align-items: center; + justify-content: center; +} + +.reveal .r-hstack { + flex-direction: row; + align-items: center; + justify-content: center; +} + +// Naming based on tailwindcss +.reveal .items-stretch { align-items: stretch; } +.reveal .items-start { align-items: flex-start; } +.reveal .items-center { align-items: center; } +.reveal .items-end { align-items: flex-end; } + +.reveal .justify-between { justify-content: space-between; } +.reveal .justify-around { justify-content: space-around; } +.reveal .justify-start { justify-content: flex-start; } +.reveal .justify-center { justify-content: center; } +.reveal .justify-end { justify-content: flex-end; } diff --git a/revealjs/css/print/paper.scss b/revealjs/css/print/paper.scss new file mode 100644 index 0000000..32fab8a --- /dev/null +++ b/revealjs/css/print/paper.scss @@ -0,0 +1,166 @@ + +@media print { + html:not(.print-pdf) { + overflow: visible; + width: auto; + height: auto; + + body { + margin: 0; + padding: 0; + overflow: visible; + } + } + + html:not(.print-pdf) .reveal { + background: #fff; + font-size: 20pt; + + .controls, + .state-background, + .progress, + .backgrounds, + .slide-number { + display: none !important; + } + + p, td, li { + font-size: 20pt!important; + color: #000; + } + + h1,h2,h3,h4,h5,h6 { + color: #000!important; + height: auto; + line-height: normal; + text-align: left; + letter-spacing: normal; + } + + h1 { font-size: 28pt !important; } + h2 { font-size: 24pt !important; } + h3 { font-size: 22pt !important; } + h4 { font-size: 22pt !important; font-variant: small-caps; } + h5 { font-size: 21pt !important; } + h6 { font-size: 20pt !important; font-style: italic; } + + a:link, + a:visited { + color: #000 !important; + font-weight: bold; + text-decoration: underline; + } + + ul, ol, div, p { + visibility: visible; + position: static; + width: auto; + height: auto; + display: block; + overflow: visible; + margin: 0; + text-align: left !important; + } + pre, + table { + margin-left: 0; + margin-right: 0; + } + pre code { + padding: 20px; + } + blockquote { + margin: 20px 0; + } + + .slides { + position: static !important; + width: auto !important; + height: auto !important; + + left: 0 !important; + top: 0 !important; + margin-left: 0 !important; + margin-top: 0 !important; + padding: 0 !important; + zoom: 1 !important; + transform: none !important; + + overflow: visible !important; + display: block !important; + + text-align: left !important; + perspective: none; + + perspective-origin: 50% 50%; + } + .slides section { + visibility: visible !important; + position: static !important; + width: auto !important; + height: auto !important; + display: block !important; + overflow: visible !important; + + left: 0 !important; + top: 0 !important; + margin-left: 0 !important; + margin-top: 0 !important; + padding: 60px 20px !important; + z-index: auto !important; + + opacity: 1 !important; + + page-break-after: always !important; + + transform-style: flat !important; + transform: none !important; + transition: none !important; + } + .slides section.stack { + padding: 0 !important; + } + .slides section:last-of-type { + page-break-after: avoid !important; + } + .slides section .fragment { + opacity: 1 !important; + visibility: visible !important; + + transform: none !important; + } + + .r-fit-text { + white-space: normal !important; + } + + section img { + display: block; + margin: 15px 0px; + background: rgba(255,255,255,1); + border: 1px solid #666; + box-shadow: none; + } + + section small { + font-size: 0.8em; + } + + .hljs { + max-height: 100%; + white-space: pre-wrap; + word-wrap: break-word; + word-break: break-word; + font-size: 15pt; + } + + .hljs .hljs-ln-numbers { + white-space: nowrap; + } + + .hljs td { + font-size: inherit !important; + color: inherit !important; + } + } +} diff --git a/revealjs/css/print/pdf.scss b/revealjs/css/print/pdf.scss new file mode 100644 index 0000000..6113810 --- /dev/null +++ b/revealjs/css/print/pdf.scss @@ -0,0 +1,155 @@ +/** + * This stylesheet is used to print reveal.js + * presentations to PDF. + * + * https://revealjs.com/pdf-export/ + */ + +html.print-pdf { + * { + -webkit-print-color-adjust: exact; + } + + & { + width: 100%; + height: 100%; + overflow: visible; + } + + body { + margin: 0 auto !important; + border: 0; + padding: 0; + float: none !important; + overflow: visible; + } + + /* Remove any elements not needed in print. */ + .nestedarrow, + .reveal .controls, + .reveal .progress, + .reveal .playback, + .reveal.overview, + .state-background { + display: none !important; + } + + .reveal pre code { + overflow: hidden !important; + font-family: Courier, 'Courier New', monospace !important; + } + + .reveal { + width: auto !important; + height: auto !important; + overflow: hidden !important; + } + .reveal .slides { + position: static; + width: 100% !important; + height: auto !important; + zoom: 1 !important; + pointer-events: initial; + + left: auto; + top: auto; + margin: 0 !important; + padding: 0 !important; + + overflow: visible; + display: block; + + perspective: none; + perspective-origin: 50% 50%; + } + + .reveal .slides .pdf-page { + position: relative; + overflow: hidden; + z-index: 1; + + page-break-after: always; + } + + .reveal .slides section { + visibility: visible !important; + display: block !important; + position: absolute !important; + + margin: 0 !important; + padding: 0 !important; + box-sizing: border-box !important; + min-height: 1px; + + opacity: 1 !important; + + transform-style: flat !important; + transform: none !important; + } + + .reveal section.stack { + position: relative !important; + margin: 0 !important; + padding: 0 !important; + page-break-after: avoid !important; + height: auto !important; + min-height: auto !important; + } + + .reveal img { + box-shadow: none; + } + + /* Slide backgrounds are placed inside of their slide when exporting to PDF */ + .reveal .backgrounds { + display: none; + } + .reveal .slide-background { + display: block !important; + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: auto !important; + } + + /* Display slide speaker notes when 'showNotes' is enabled */ + .reveal.show-notes { + max-width: none; + max-height: none; + } + .reveal .speaker-notes-pdf { + display: block; + width: 100%; + height: auto; + max-height: none; + top: auto; + right: auto; + bottom: auto; + left: auto; + z-index: 100; + } + + /* Layout option which makes notes appear on a separate page */ + .reveal .speaker-notes-pdf[data-layout="separate-page"] { + position: relative; + color: inherit; + background-color: transparent; + padding: 20px; + page-break-after: always; + border: 0; + } + + /* Display slide numbers when 'slideNumber' is enabled */ + .reveal .slide-number-pdf { + display: block; + position: absolute; + font-size: 14px; + } + + /* This accessibility tool is not useful in PDF and breaks it visually */ + .aria-status { + display: none; + } +} diff --git a/revealjs/css/reveal.scss b/revealjs/css/reveal.scss new file mode 100644 index 0000000..6f741e0 --- /dev/null +++ b/revealjs/css/reveal.scss @@ -0,0 +1,1869 @@ +@use "sass:math"; + +/** + * reveal.js + * http://revealjs.com + * MIT licensed + * + * Copyright (C) Hakim El Hattab, https://hakim.se + */ + +@import 'layout'; + +/********************************************* + * GLOBAL STYLES + *********************************************/ + +html.reveal-full-page { + width: 100%; + height: 100%; + height: 100vh; + height: calc( var(--vh, 1vh) * 100 ); + overflow: hidden; +} + +.reveal-viewport { + height: 100%; + overflow: hidden; + position: relative; + line-height: 1; + margin: 0; + + background-color: #fff; + color: #000; +} + +// Force the presentation to cover the full viewport when we +// enter fullscreen mode. Fixes sizing issues in Safari. +.reveal-viewport:fullscreen { + top: 0 !important; + left: 0 !important; + width: 100% !important; + height: 100% !important; + transform: none !important; +} + + +/********************************************* + * VIEW FRAGMENTS + *********************************************/ + +.reveal .fragment { + transition: all .2s ease; + + &:not(.custom) { + opacity: 0; + visibility: hidden; + will-change: opacity; + } + + &.visible { + opacity: 1; + visibility: inherit; + } + + &.disabled { + transition: none; + } +} + +.reveal .fragment.grow { + opacity: 1; + visibility: inherit; + + &.visible { + transform: scale( 1.3 ); + } +} + +.reveal .fragment.shrink { + opacity: 1; + visibility: inherit; + + &.visible { + transform: scale( 0.7 ); + } +} + +.reveal .fragment.zoom-in { + transform: scale( 0.1 ); + + &.visible { + transform: none; + } +} + +.reveal .fragment.fade-out { + opacity: 1; + visibility: inherit; + + &.visible { + opacity: 0; + visibility: hidden; + } +} + +.reveal .fragment.semi-fade-out { + opacity: 1; + visibility: inherit; + + &.visible { + opacity: 0.5; + visibility: inherit; + } +} + +.reveal .fragment.strike { + opacity: 1; + visibility: inherit; + + &.visible { + text-decoration: line-through; + } +} + +.reveal .fragment.fade-up { + transform: translate(0, 40px); + + &.visible { + transform: translate(0, 0); + } +} + +.reveal .fragment.fade-down { + transform: translate(0, -40px); + + &.visible { + transform: translate(0, 0); + } +} + +.reveal .fragment.fade-right { + transform: translate(-40px, 0); + + &.visible { + transform: translate(0, 0); + } +} + +.reveal .fragment.fade-left { + transform: translate(40px, 0); + + &.visible { + transform: translate(0, 0); + } +} + +.reveal .fragment.fade-in-then-out, +.reveal .fragment.current-visible { + opacity: 0; + visibility: hidden; + + &.current-fragment { + opacity: 1; + visibility: inherit; + } +} + +.reveal .fragment.fade-in-then-semi-out { + opacity: 0; + visibility: hidden; + + &.visible { + opacity: 0.5; + visibility: inherit; + } + + &.current-fragment { + opacity: 1; + visibility: inherit; + } +} + +.reveal .fragment.highlight-red, +.reveal .fragment.highlight-current-red, +.reveal .fragment.highlight-green, +.reveal .fragment.highlight-current-green, +.reveal .fragment.highlight-blue, +.reveal .fragment.highlight-current-blue { + opacity: 1; + visibility: inherit; +} + .reveal .fragment.highlight-red.visible { + color: #ff2c2d + } + .reveal .fragment.highlight-green.visible { + color: #17ff2e; + } + .reveal .fragment.highlight-blue.visible { + color: #1b91ff; + } + +.reveal .fragment.highlight-current-red.current-fragment { + color: #ff2c2d +} +.reveal .fragment.highlight-current-green.current-fragment { + color: #17ff2e; +} +.reveal .fragment.highlight-current-blue.current-fragment { + color: #1b91ff; +} + + +/********************************************* + * DEFAULT ELEMENT STYLES + *********************************************/ + +/* Fixes issue in Chrome where italic fonts did not appear when printing to PDF */ +.reveal:after { + content: ''; + font-style: italic; +} + +.reveal iframe { + z-index: 1; +} + +/** Prevents layering issues in certain browser/transition combinations */ +.reveal a { + position: relative; +} + + +/********************************************* + * CONTROLS + *********************************************/ + +@keyframes bounce-right { + 0%, 10%, 25%, 40%, 50% {transform: translateX(0);} + 20% {transform: translateX(10px);} + 30% {transform: translateX(-5px);} +} + +@keyframes bounce-left { + 0%, 10%, 25%, 40%, 50% {transform: translateX(0);} + 20% {transform: translateX(-10px);} + 30% {transform: translateX(5px);} +} + +@keyframes bounce-down { + 0%, 10%, 25%, 40%, 50% {transform: translateY(0);} + 20% {transform: translateY(10px);} + 30% {transform: translateY(-5px);} +} + +$controlArrowSize: 3.6em; +$controlArrowSpacing: 1.4em; +$controlArrowLength: 2.6em; +$controlArrowThickness: 0.5em; +$controlsArrowAngle: 45deg; +$controlsArrowAngleHover: 40deg; +$controlsArrowAngleActive: 36deg; + +@mixin controlsArrowTransform( $angle ) { + &:before { + transform: translateX(($controlArrowSize - $controlArrowLength)*0.5) translateY(($controlArrowSize - $controlArrowThickness)*0.5) rotate( $angle ); + } + + &:after { + transform: translateX(($controlArrowSize - $controlArrowLength)*0.5) translateY(($controlArrowSize - $controlArrowThickness)*0.5) rotate( -$angle ); + } +} + +.reveal .controls { + $spacing: 12px; + + display: none; + position: absolute; + top: auto; + bottom: $spacing; + right: $spacing; + left: auto; + z-index: 11; + color: #000; + pointer-events: none; + font-size: 10px; + + button { + position: absolute; + padding: 0; + background-color: transparent; + border: 0; + outline: 0; + cursor: pointer; + color: currentColor; + transform: scale(.9999); + transition: color 0.2s ease, + opacity 0.2s ease, + transform 0.2s ease; + z-index: 2; // above slides + pointer-events: auto; + font-size: inherit; + + visibility: hidden; + opacity: 0; + + -webkit-appearance: none; + -webkit-tap-highlight-color: rgba( 0, 0, 0, 0 ); + } + + .controls-arrow:before, + .controls-arrow:after { + content: ''; + position: absolute; + top: 0; + left: 0; + width: $controlArrowLength; + height: $controlArrowThickness; + border-radius: $controlArrowThickness*0.5; + background-color: currentColor; + + transition: all 0.15s ease, background-color 0.8s ease; + transform-origin: math.div(floor(($controlArrowThickness*0.5)*10), 10) 50%; + will-change: transform; + } + + .controls-arrow { + position: relative; + width: $controlArrowSize; + height: $controlArrowSize; + + @include controlsArrowTransform( $controlsArrowAngle ); + + &:hover { + @include controlsArrowTransform( $controlsArrowAngleHover ); + } + + &:active { + @include controlsArrowTransform( $controlsArrowAngleActive ); + } + } + + .navigate-left { + right: $controlArrowSize + $controlArrowSpacing*2; + bottom: $controlArrowSpacing + $controlArrowSize*0.5; + transform: translateX( -10px ); + + &.highlight { + animation: bounce-left 2s 50 both ease-out; + } + } + + .navigate-right { + right: 0; + bottom: $controlArrowSpacing + $controlArrowSize*0.5; + transform: translateX( 10px ); + + .controls-arrow { + transform: rotate( 180deg ); + } + + &.highlight { + animation: bounce-right 2s 50 both ease-out; + } + } + + .navigate-up { + right: $controlArrowSpacing + $controlArrowSize*0.5; + bottom: $controlArrowSpacing*2 + $controlArrowSize; + transform: translateY( -10px ); + + .controls-arrow { + transform: rotate( 90deg ); + } + } + + .navigate-down { + right: $controlArrowSpacing + $controlArrowSize*0.5; + bottom: -$controlArrowSpacing; + padding-bottom: $controlArrowSpacing; + transform: translateY( 10px ); + + .controls-arrow { + transform: rotate( -90deg ); + } + + &.highlight { + animation: bounce-down 2s 50 both ease-out; + } + } + + // Back arrow style: "faded": + // Deemphasize backwards navigation arrows in favor of drawing + // attention to forwards navigation + &[data-controls-back-arrows="faded"] .navigate-up.enabled { + opacity: 0.3; + + &:hover { + opacity: 1; + } + } + + // Back arrow style: "hidden": + // Never show arrows for backwards navigation + &[data-controls-back-arrows="hidden"] .navigate-up.enabled { + opacity: 0; + visibility: hidden; + } + + // Any control button that can be clicked is "enabled" + .enabled { + visibility: visible; + opacity: 0.9; + cursor: pointer; + transform: none; + } + + // Any control button that leads to showing or hiding + // a fragment + .enabled.fragmented { + opacity: 0.5; + } + + .enabled:hover, + .enabled.fragmented:hover { + opacity: 1; + } +} + +.reveal:not(.rtl) .controls { + // Back arrow style: "faded": + // Deemphasize left arrow + &[data-controls-back-arrows="faded"] .navigate-left.enabled { + opacity: 0.3; + + &:hover { + opacity: 1; + } + } + + // Back arrow style: "hidden": + // Never show left arrow + &[data-controls-back-arrows="hidden"] .navigate-left.enabled { + opacity: 0; + visibility: hidden; + } +} + +.reveal.rtl .controls { + // Back arrow style: "faded": + // Deemphasize right arrow in RTL mode + &[data-controls-back-arrows="faded"] .navigate-right.enabled { + opacity: 0.3; + + &:hover { + opacity: 1; + } + } + + // Back arrow style: "hidden": + // Never show right arrow in RTL mode + &[data-controls-back-arrows="hidden"] .navigate-right.enabled { + opacity: 0; + visibility: hidden; + } +} + +.reveal[data-navigation-mode="linear"].has-horizontal-slides .navigate-up, +.reveal[data-navigation-mode="linear"].has-horizontal-slides .navigate-down { + display: none; +} + +// Adjust the layout when there are no vertical slides +.reveal[data-navigation-mode="linear"].has-horizontal-slides .navigate-left, +.reveal:not(.has-vertical-slides) .controls .navigate-left { + bottom: $controlArrowSpacing; + right: 0.5em + $controlArrowSpacing + $controlArrowSize; +} + +.reveal[data-navigation-mode="linear"].has-horizontal-slides .navigate-right, +.reveal:not(.has-vertical-slides) .controls .navigate-right { + bottom: $controlArrowSpacing; + right: 0.5em; +} + +// Adjust the layout when there are no horizontal slides +.reveal:not(.has-horizontal-slides) .controls .navigate-up { + right: $controlArrowSpacing; + bottom: $controlArrowSpacing + $controlArrowSize; +} +.reveal:not(.has-horizontal-slides) .controls .navigate-down { + right: $controlArrowSpacing; + bottom: 0.5em; +} + +// Invert arrows based on background color +.reveal.has-dark-background .controls { + color: #fff; +} +.reveal.has-light-background .controls { + color: #000; +} + +// Disable active states on touch devices +.reveal.no-hover .controls .controls-arrow:hover, +.reveal.no-hover .controls .controls-arrow:active { + @include controlsArrowTransform( $controlsArrowAngle ); +} + +// Edge aligned controls layout +@media screen and (min-width: 500px) { + + $spacing: 0.8em; + + .reveal .controls[data-controls-layout="edges"] { + & { + top: 0; + right: 0; + bottom: 0; + left: 0; + } + + .navigate-left, + .navigate-right, + .navigate-up, + .navigate-down { + bottom: auto; + right: auto; + } + + .navigate-left { + top: 50%; + left: $spacing; + margin-top: -$controlArrowSize*0.5; + } + + .navigate-right { + top: 50%; + right: $spacing; + margin-top: -$controlArrowSize*0.5; + } + + .navigate-up { + top: $spacing; + left: 50%; + margin-left: -$controlArrowSize*0.5; + } + + .navigate-down { + bottom: $spacing - $controlArrowSpacing + 0.3em; + left: 50%; + margin-left: -$controlArrowSize*0.5; + } + } + +} + + +/********************************************* + * PROGRESS BAR + *********************************************/ + +.reveal .progress { + position: absolute; + display: none; + height: 3px; + width: 100%; + bottom: 0; + left: 0; + z-index: 10; + + background-color: rgba( 0, 0, 0, 0.2 ); + color: #fff; +} + .reveal .progress:after { + content: ''; + display: block; + position: absolute; + height: 10px; + width: 100%; + top: -10px; + } + .reveal .progress span { + display: block; + height: 100%; + width: 100%; + + background-color: currentColor; + transition: transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985); + transform-origin: 0 0; + transform: scaleX(0); + } + +/********************************************* + * SLIDE NUMBER + *********************************************/ + +.reveal .slide-number { + position: absolute; + display: block; + right: 8px; + bottom: 8px; + z-index: 31; + font-family: Helvetica, sans-serif; + font-size: 12px; + line-height: 1; + color: #fff; + background-color: rgba( 0, 0, 0, 0.4 ); + padding: 5px; +} + +.reveal .slide-number a { + color: currentColor; +} + +.reveal .slide-number-delimiter { + margin: 0 3px; +} + +/********************************************* + * SLIDES + *********************************************/ + +.reveal { + position: relative; + width: 100%; + height: 100%; + overflow: hidden; + touch-action: pinch-zoom; +} + +// Swiping on an embedded deck should not block page scrolling +.reveal.embedded { + touch-action: pan-y; +} + +.reveal .slides { + position: absolute; + width: 100%; + height: 100%; + top: 0; + right: 0; + bottom: 0; + left: 0; + margin: auto; + pointer-events: none; + + overflow: visible; + z-index: 1; + text-align: center; + perspective: 600px; + perspective-origin: 50% 40%; +} + +.reveal .slides>section { + perspective: 600px; +} + +.reveal .slides>section, +.reveal .slides>section>section { + display: none; + position: absolute; + width: 100%; + pointer-events: auto; + + z-index: 10; + transform-style: flat; + transition: transform-origin 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985), + transform 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985), + visibility 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985), + opacity 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985); +} + +/* Global transition speed settings */ +.reveal[data-transition-speed="fast"] .slides section { + transition-duration: 400ms; +} +.reveal[data-transition-speed="slow"] .slides section { + transition-duration: 1200ms; +} + +/* Slide-specific transition speed overrides */ +.reveal .slides section[data-transition-speed="fast"] { + transition-duration: 400ms; +} +.reveal .slides section[data-transition-speed="slow"] { + transition-duration: 1200ms; +} + +.reveal .slides>section.stack { + padding-top: 0; + padding-bottom: 0; + pointer-events: none; + height: 100%; +} + +.reveal .slides>section.present, +.reveal .slides>section>section.present { + display: block; + z-index: 11; + opacity: 1; +} + +.reveal .slides>section:empty, +.reveal .slides>section>section:empty, +.reveal .slides>section[data-background-interactive], +.reveal .slides>section>section[data-background-interactive] { + pointer-events: none; +} + +.reveal.center, +.reveal.center .slides, +.reveal.center .slides section { + min-height: 0 !important; +} + +/* Don't allow interaction with invisible slides */ +.reveal .slides>section:not(.present), +.reveal .slides>section>section:not(.present) { + pointer-events: none; +} + +.reveal.overview .slides>section, +.reveal.overview .slides>section>section { + pointer-events: auto; +} + +.reveal .slides>section.past, +.reveal .slides>section.future, +.reveal .slides>section.past>section, +.reveal .slides>section.future>section, +.reveal .slides>section>section.past, +.reveal .slides>section>section.future { + opacity: 0; +} + + +/********************************************* + * Mixins for readability of transitions + *********************************************/ + +@mixin transition-global($style) { + .reveal .slides section[data-transition=#{$style}], + .reveal.#{$style} .slides section:not([data-transition]) { + @content; + } +} +@mixin transition-stack($style) { + .reveal .slides section[data-transition=#{$style}].stack, + .reveal.#{$style} .slides section.stack { + @content; + } +} +@mixin transition-horizontal-past($style) { + .reveal .slides>section[data-transition=#{$style}].past, + .reveal .slides>section[data-transition~=#{$style}-out].past, + .reveal.#{$style} .slides>section:not([data-transition]).past { + @content; + } +} +@mixin transition-horizontal-future($style) { + .reveal .slides>section[data-transition=#{$style}].future, + .reveal .slides>section[data-transition~=#{$style}-in].future, + .reveal.#{$style} .slides>section:not([data-transition]).future { + @content; + } +} + +@mixin transition-vertical-past($style) { + .reveal .slides>section>section[data-transition=#{$style}].past, + .reveal .slides>section>section[data-transition~=#{$style}-out].past, + .reveal.#{$style} .slides>section>section:not([data-transition]).past { + @content; + } +} +@mixin transition-vertical-future($style) { + .reveal .slides>section>section[data-transition=#{$style}].future, + .reveal .slides>section>section[data-transition~=#{$style}-in].future, + .reveal.#{$style} .slides>section>section:not([data-transition]).future { + @content; + } +} + +/********************************************* + * SLIDE TRANSITION + * Aliased 'linear' for backwards compatibility + *********************************************/ + +@each $stylename in slide, linear { + @include transition-horizontal-past(#{$stylename}) { + transform: translate(-150%, 0); + } + @include transition-horizontal-future(#{$stylename}) { + transform: translate(150%, 0); + } + @include transition-vertical-past(#{$stylename}) { + transform: translate(0, -150%); + } + @include transition-vertical-future(#{$stylename}) { + transform: translate(0, 150%); + } +} + +/********************************************* + * CONVEX TRANSITION + * Aliased 'default' for backwards compatibility + *********************************************/ + +@each $stylename in default, convex { + @include transition-stack(#{$stylename}) { + transform-style: preserve-3d; + } + + @include transition-horizontal-past(#{$stylename}) { + transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); + } + @include transition-horizontal-future(#{$stylename}) { + transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); + } + @include transition-vertical-past(#{$stylename}) { + transform: translate3d(0, -300px, 0) rotateX(70deg) translate3d(0, -300px, 0); + } + @include transition-vertical-future(#{$stylename}) { + transform: translate3d(0, 300px, 0) rotateX(-70deg) translate3d(0, 300px, 0); + } +} + +/********************************************* + * CONCAVE TRANSITION + *********************************************/ + +@include transition-stack(concave) { + transform-style: preserve-3d; +} + +@include transition-horizontal-past(concave) { + transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0); +} +@include transition-horizontal-future(concave) { + transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0); +} +@include transition-vertical-past(concave) { + transform: translate3d(0, -80%, 0) rotateX(-70deg) translate3d(0, -80%, 0); +} +@include transition-vertical-future(concave) { + transform: translate3d(0, 80%, 0) rotateX(70deg) translate3d(0, 80%, 0); +} + + +/********************************************* + * ZOOM TRANSITION + *********************************************/ + +@include transition-global(zoom) { + transition-timing-function: ease; +} +@include transition-horizontal-past(zoom) { + visibility: hidden; + transform: scale(16); +} +@include transition-horizontal-future(zoom) { + visibility: hidden; + transform: scale(0.2); +} +@include transition-vertical-past(zoom) { + transform: scale(16); +} +@include transition-vertical-future(zoom) { + transform: scale(0.2); +} + + +/********************************************* + * CUBE TRANSITION + * + * WARNING: + * this is deprecated and will be removed in a + * future version. + *********************************************/ + +.reveal.cube .slides { + perspective: 1300px; +} + +.reveal.cube .slides section { + padding: 30px; + min-height: 700px; + backface-visibility: hidden; + box-sizing: border-box; + transform-style: preserve-3d; +} + .reveal.center.cube .slides section { + min-height: 0; + } + .reveal.cube .slides section:not(.stack):before { + content: ''; + position: absolute; + display: block; + width: 100%; + height: 100%; + left: 0; + top: 0; + background: rgba(0,0,0,0.1); + border-radius: 4px; + transform: translateZ( -20px ); + } + .reveal.cube .slides section:not(.stack):after { + content: ''; + position: absolute; + display: block; + width: 90%; + height: 30px; + left: 5%; + bottom: 0; + background: none; + z-index: 1; + + border-radius: 4px; + box-shadow: 0px 95px 25px rgba(0,0,0,0.2); + transform: translateZ(-90px) rotateX( 65deg ); + } + +.reveal.cube .slides>section.stack { + padding: 0; + background: none; +} + +.reveal.cube .slides>section.past { + transform-origin: 100% 0%; + transform: translate3d(-100%, 0, 0) rotateY(-90deg); +} + +.reveal.cube .slides>section.future { + transform-origin: 0% 0%; + transform: translate3d(100%, 0, 0) rotateY(90deg); +} + +.reveal.cube .slides>section>section.past { + transform-origin: 0% 100%; + transform: translate3d(0, -100%, 0) rotateX(90deg); +} + +.reveal.cube .slides>section>section.future { + transform-origin: 0% 0%; + transform: translate3d(0, 100%, 0) rotateX(-90deg); +} + + +/********************************************* + * PAGE TRANSITION + * + * WARNING: + * this is deprecated and will be removed in a + * future version. + *********************************************/ + +.reveal.page .slides { + perspective-origin: 0% 50%; + perspective: 3000px; +} + +.reveal.page .slides section { + padding: 30px; + min-height: 700px; + box-sizing: border-box; + transform-style: preserve-3d; +} + .reveal.page .slides section.past { + z-index: 12; + } + .reveal.page .slides section:not(.stack):before { + content: ''; + position: absolute; + display: block; + width: 100%; + height: 100%; + left: 0; + top: 0; + background: rgba(0,0,0,0.1); + transform: translateZ( -20px ); + } + .reveal.page .slides section:not(.stack):after { + content: ''; + position: absolute; + display: block; + width: 90%; + height: 30px; + left: 5%; + bottom: 0; + background: none; + z-index: 1; + + border-radius: 4px; + box-shadow: 0px 95px 25px rgba(0,0,0,0.2); + + -webkit-transform: translateZ(-90px) rotateX( 65deg ); + } + +.reveal.page .slides>section.stack { + padding: 0; + background: none; +} + +.reveal.page .slides>section.past { + transform-origin: 0% 0%; + transform: translate3d(-40%, 0, 0) rotateY(-80deg); +} + +.reveal.page .slides>section.future { + transform-origin: 100% 0%; + transform: translate3d(0, 0, 0); +} + +.reveal.page .slides>section>section.past { + transform-origin: 0% 0%; + transform: translate3d(0, -40%, 0) rotateX(80deg); +} + +.reveal.page .slides>section>section.future { + transform-origin: 0% 100%; + transform: translate3d(0, 0, 0); +} + + +/********************************************* + * FADE TRANSITION + *********************************************/ + +.reveal .slides section[data-transition=fade], +.reveal.fade .slides section:not([data-transition]), +.reveal.fade .slides>section>section:not([data-transition]) { + transform: none; + transition: opacity 0.5s; +} + + +.reveal.fade.overview .slides section, +.reveal.fade.overview .slides>section>section { + transition: none; +} + + +/********************************************* + * NO TRANSITION + *********************************************/ + +@include transition-global(none) { + transform: none; + transition: none; +} + + +/********************************************* + * PAUSED MODE + *********************************************/ + +.reveal .pause-overlay { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: black; + visibility: hidden; + opacity: 0; + z-index: 100; + transition: all 1s ease; +} + +.reveal .pause-overlay .resume-button { + position: absolute; + bottom: 20px; + right: 20px; + color: #ccc; + border-radius: 2px; + padding: 6px 14px; + border: 2px solid #ccc; + font-size: 16px; + background: transparent; + cursor: pointer; + + &:hover { + color: #fff; + border-color: #fff; + } +} + +.reveal.paused .pause-overlay { + visibility: visible; + opacity: 1; +} + + +/********************************************* + * FALLBACK + *********************************************/ + +.reveal .no-transition, +.reveal .no-transition *, +.reveal .slides.disable-slide-transitions section { + transition: none !important; +} + +.reveal .slides.disable-slide-transitions section { + transform: none !important; +} + + +/********************************************* + * PER-SLIDE BACKGROUNDS + *********************************************/ + +.reveal .backgrounds { + position: absolute; + width: 100%; + height: 100%; + top: 0; + left: 0; + perspective: 600px; +} + .reveal .slide-background { + display: none; + position: absolute; + width: 100%; + height: 100%; + opacity: 0; + visibility: hidden; + overflow: hidden; + + background-color: rgba( 0, 0, 0, 0 ); + + transition: all 800ms cubic-bezier(0.260, 0.860, 0.440, 0.985); + } + + .reveal .slide-background-content { + position: absolute; + width: 100%; + height: 100%; + + background-position: 50% 50%; + background-repeat: no-repeat; + background-size: cover; + } + + .reveal .slide-background.stack { + display: block; + } + + .reveal .slide-background.present { + opacity: 1; + visibility: visible; + z-index: 2; + } + + .print-pdf .reveal .slide-background { + opacity: 1 !important; + visibility: visible !important; + } + +/* Video backgrounds */ +.reveal .slide-background video { + position: absolute; + width: 100%; + height: 100%; + max-width: none; + max-height: none; + top: 0; + left: 0; + object-fit: cover; +} + .reveal .slide-background[data-background-size="contain"] video { + object-fit: contain; + } + +/* Immediate transition style */ +.reveal[data-background-transition=none]>.backgrounds .slide-background:not([data-background-transition]), +.reveal>.backgrounds .slide-background[data-background-transition=none] { + transition: none; +} + +/* Slide */ +.reveal[data-background-transition=slide]>.backgrounds .slide-background:not([data-background-transition]), +.reveal>.backgrounds .slide-background[data-background-transition=slide] { + opacity: 1; +} + .reveal[data-background-transition=slide]>.backgrounds .slide-background.past:not([data-background-transition]), + .reveal>.backgrounds .slide-background.past[data-background-transition=slide] { + transform: translate(-100%, 0); + } + .reveal[data-background-transition=slide]>.backgrounds .slide-background.future:not([data-background-transition]), + .reveal>.backgrounds .slide-background.future[data-background-transition=slide] { + transform: translate(100%, 0); + } + + .reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]), + .reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=slide] { + transform: translate(0, -100%); + } + .reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]), + .reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=slide] { + transform: translate(0, 100%); + } + + +/* Convex */ +.reveal[data-background-transition=convex]>.backgrounds .slide-background.past:not([data-background-transition]), +.reveal>.backgrounds .slide-background.past[data-background-transition=convex] { + opacity: 0; + transform: translate3d(-100%, 0, 0) rotateY(-90deg) translate3d(-100%, 0, 0); +} +.reveal[data-background-transition=convex]>.backgrounds .slide-background.future:not([data-background-transition]), +.reveal>.backgrounds .slide-background.future[data-background-transition=convex] { + opacity: 0; + transform: translate3d(100%, 0, 0) rotateY(90deg) translate3d(100%, 0, 0); +} + +.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]), +.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=convex] { + opacity: 0; + transform: translate3d(0, -100%, 0) rotateX(90deg) translate3d(0, -100%, 0); +} +.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]), +.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=convex] { + opacity: 0; + transform: translate3d(0, 100%, 0) rotateX(-90deg) translate3d(0, 100%, 0); +} + + +/* Concave */ +.reveal[data-background-transition=concave]>.backgrounds .slide-background.past:not([data-background-transition]), +.reveal>.backgrounds .slide-background.past[data-background-transition=concave] { + opacity: 0; + transform: translate3d(-100%, 0, 0) rotateY(90deg) translate3d(-100%, 0, 0); +} +.reveal[data-background-transition=concave]>.backgrounds .slide-background.future:not([data-background-transition]), +.reveal>.backgrounds .slide-background.future[data-background-transition=concave] { + opacity: 0; + transform: translate3d(100%, 0, 0) rotateY(-90deg) translate3d(100%, 0, 0); +} + +.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]), +.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=concave] { + opacity: 0; + transform: translate3d(0, -100%, 0) rotateX(-90deg) translate3d(0, -100%, 0); +} +.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]), +.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=concave] { + opacity: 0; + transform: translate3d(0, 100%, 0) rotateX(90deg) translate3d(0, 100%, 0); +} + +/* Zoom */ +.reveal[data-background-transition=zoom]>.backgrounds .slide-background:not([data-background-transition]), +.reveal>.backgrounds .slide-background[data-background-transition=zoom] { + transition-timing-function: ease; +} + +.reveal[data-background-transition=zoom]>.backgrounds .slide-background.past:not([data-background-transition]), +.reveal>.backgrounds .slide-background.past[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + transform: scale(16); +} +.reveal[data-background-transition=zoom]>.backgrounds .slide-background.future:not([data-background-transition]), +.reveal>.backgrounds .slide-background.future[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + transform: scale(0.2); +} + +.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]), +.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + transform: scale(16); +} +.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]), +.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=zoom] { + opacity: 0; + visibility: hidden; + transform: scale(0.2); +} + + +/* Global transition speed settings */ +.reveal[data-transition-speed="fast"]>.backgrounds .slide-background { + transition-duration: 400ms; +} +.reveal[data-transition-speed="slow"]>.backgrounds .slide-background { + transition-duration: 1200ms; +} + + +/********************************************* + * AUTO ANIMATE + *********************************************/ + +.reveal [data-auto-animate-target^="unmatched"] { + will-change: opacity; +} + +.reveal section[data-auto-animate]:not(.stack):not([data-auto-animate="running"]) [data-auto-animate-target^="unmatched"] { + opacity: 0; +} + + +/********************************************* + * OVERVIEW + *********************************************/ + +.reveal.overview { + perspective-origin: 50% 50%; + perspective: 700px; + + .slides { + // Fixes overview rendering errors in FF48+, not applied to + // other browsers since it degrades performance + -moz-transform-style: preserve-3d; + } + + .slides section { + height: 100%; + top: 0 !important; + opacity: 1 !important; + overflow: hidden; + visibility: visible !important; + cursor: pointer; + box-sizing: border-box; + } + .slides section:hover, + .slides section.present { + outline: 10px solid rgba(150,150,150,0.4); + outline-offset: 10px; + } + .slides section .fragment { + opacity: 1; + transition: none; + } + .slides section:after, + .slides section:before { + display: none !important; + } + .slides>section.stack { + padding: 0; + top: 0 !important; + background: none; + outline: none; + overflow: visible; + } + + .backgrounds { + perspective: inherit; + + // Fixes overview rendering errors in FF48+, not applied to + // other browsers since it degrades performance + -moz-transform-style: preserve-3d; + } + + .backgrounds .slide-background { + opacity: 1; + visibility: visible; + + // This can't be applied to the slide itself in Safari + outline: 10px solid rgba(150,150,150,0.1); + outline-offset: 10px; + } + + .backgrounds .slide-background.stack { + overflow: visible; + } +} + +// Disable transitions transitions while we're activating +// or deactivating the overview mode. +.reveal.overview .slides section, +.reveal.overview-deactivating .slides section { + transition: none; +} + +.reveal.overview .backgrounds .slide-background, +.reveal.overview-deactivating .backgrounds .slide-background { + transition: none; +} + + +/********************************************* + * RTL SUPPORT + *********************************************/ + +.reveal.rtl .slides, +.reveal.rtl .slides h1, +.reveal.rtl .slides h2, +.reveal.rtl .slides h3, +.reveal.rtl .slides h4, +.reveal.rtl .slides h5, +.reveal.rtl .slides h6 { + direction: rtl; + font-family: sans-serif; +} + +.reveal.rtl pre, +.reveal.rtl code { + direction: ltr; +} + +.reveal.rtl ol, +.reveal.rtl ul { + text-align: right; +} + +.reveal.rtl .progress span { + transform-origin: 100% 0; +} + +/********************************************* + * PARALLAX BACKGROUND + *********************************************/ + +.reveal.has-parallax-background .backgrounds { + transition: all 0.8s ease; +} + +/* Global transition speed settings */ +.reveal.has-parallax-background[data-transition-speed="fast"] .backgrounds { + transition-duration: 400ms; +} +.reveal.has-parallax-background[data-transition-speed="slow"] .backgrounds { + transition-duration: 1200ms; +} + + +/********************************************* + * OVERLAY FOR LINK PREVIEWS AND HELP + *********************************************/ + +$overlayHeaderHeight: 40px; +$overlayHeaderPadding: 5px; + +.reveal > .overlay { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: 1000; + background: rgba( 0, 0, 0, 0.9 ); + transition: all 0.3s ease; +} + + .reveal > .overlay .spinner { + position: absolute; + display: block; + top: 50%; + left: 50%; + width: 32px; + height: 32px; + margin: -16px 0 0 -16px; + z-index: 10; + background-image: url(data:image/gif;base64,R0lGODlhIAAgAPMAAJmZmf%2F%2F%2F6%2Bvr8nJybW1tcDAwOjo6Nvb26ioqKOjo7Ozs%2FLy8vz8%2FAAAAAAAAAAAACH%2FC05FVFNDQVBFMi4wAwEAAAAh%2FhpDcmVhdGVkIHdpdGggYWpheGxvYWQuaW5mbwAh%2BQQJCgAAACwAAAAAIAAgAAAE5xDISWlhperN52JLhSSdRgwVo1ICQZRUsiwHpTJT4iowNS8vyW2icCF6k8HMMBkCEDskxTBDAZwuAkkqIfxIQyhBQBFvAQSDITM5VDW6XNE4KagNh6Bgwe60smQUB3d4Rz1ZBApnFASDd0hihh12BkE9kjAJVlycXIg7CQIFA6SlnJ87paqbSKiKoqusnbMdmDC2tXQlkUhziYtyWTxIfy6BE8WJt5YJvpJivxNaGmLHT0VnOgSYf0dZXS7APdpB309RnHOG5gDqXGLDaC457D1zZ%2FV%2FnmOM82XiHRLYKhKP1oZmADdEAAAh%2BQQJCgAAACwAAAAAIAAgAAAE6hDISWlZpOrNp1lGNRSdRpDUolIGw5RUYhhHukqFu8DsrEyqnWThGvAmhVlteBvojpTDDBUEIFwMFBRAmBkSgOrBFZogCASwBDEY%2FCZSg7GSE0gSCjQBMVG023xWBhklAnoEdhQEfyNqMIcKjhRsjEdnezB%2BA4k8gTwJhFuiW4dokXiloUepBAp5qaKpp6%2BHo7aWW54wl7obvEe0kRuoplCGepwSx2jJvqHEmGt6whJpGpfJCHmOoNHKaHx61WiSR92E4lbFoq%2BB6QDtuetcaBPnW6%2BO7wDHpIiK9SaVK5GgV543tzjgGcghAgAh%2BQQJCgAAACwAAAAAIAAgAAAE7hDISSkxpOrN5zFHNWRdhSiVoVLHspRUMoyUakyEe8PTPCATW9A14E0UvuAKMNAZKYUZCiBMuBakSQKG8G2FzUWox2AUtAQFcBKlVQoLgQReZhQlCIJesQXI5B0CBnUMOxMCenoCfTCEWBsJColTMANldx15BGs8B5wlCZ9Po6OJkwmRpnqkqnuSrayqfKmqpLajoiW5HJq7FL1Gr2mMMcKUMIiJgIemy7xZtJsTmsM4xHiKv5KMCXqfyUCJEonXPN2rAOIAmsfB3uPoAK%2B%2BG%2Bw48edZPK%2BM6hLJpQg484enXIdQFSS1u6UhksENEQAAIfkECQoAAAAsAAAAACAAIAAABOcQyEmpGKLqzWcZRVUQnZYg1aBSh2GUVEIQ2aQOE%2BG%2BcD4ntpWkZQj1JIiZIogDFFyHI0UxQwFugMSOFIPJftfVAEoZLBbcLEFhlQiqGp1Vd140AUklUN3eCA51C1EWMzMCezCBBmkxVIVHBWd3HHl9JQOIJSdSnJ0TDKChCwUJjoWMPaGqDKannasMo6WnM562R5YluZRwur0wpgqZE7NKUm%2BFNRPIhjBJxKZteWuIBMN4zRMIVIhffcgojwCF117i4nlLnY5ztRLsnOk%2BaV%2BoJY7V7m76PdkS4trKcdg0Zc0tTcKkRAAAIfkECQoAAAAsAAAAACAAIAAABO4QyEkpKqjqzScpRaVkXZWQEximw1BSCUEIlDohrft6cpKCk5xid5MNJTaAIkekKGQkWyKHkvhKsR7ARmitkAYDYRIbUQRQjWBwJRzChi9CRlBcY1UN4g0%2FVNB0AlcvcAYHRyZPdEQFYV8ccwR5HWxEJ02YmRMLnJ1xCYp0Y5idpQuhopmmC2KgojKasUQDk5BNAwwMOh2RtRq5uQuPZKGIJQIGwAwGf6I0JXMpC8C7kXWDBINFMxS4DKMAWVWAGYsAdNqW5uaRxkSKJOZKaU3tPOBZ4DuK2LATgJhkPJMgTwKCdFjyPHEnKxFCDhEAACH5BAkKAAAALAAAAAAgACAAAATzEMhJaVKp6s2nIkolIJ2WkBShpkVRWqqQrhLSEu9MZJKK9y1ZrqYK9WiClmvoUaF8gIQSNeF1Er4MNFn4SRSDARWroAIETg1iVwuHjYB1kYc1mwruwXKC9gmsJXliGxc%2BXiUCby9ydh1sOSdMkpMTBpaXBzsfhoc5l58Gm5yToAaZhaOUqjkDgCWNHAULCwOLaTmzswadEqggQwgHuQsHIoZCHQMMQgQGubVEcxOPFAcMDAYUA85eWARmfSRQCdcMe0zeP1AAygwLlJtPNAAL19DARdPzBOWSm1brJBi45soRAWQAAkrQIykShQ9wVhHCwCQCACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiRMDjI0Fd30%2FiI2UA5GSS5UDj2l6NoqgOgN4gksEBgYFf0FDqKgHnyZ9OX8HrgYHdHpcHQULXAS2qKpENRg7eAMLC7kTBaixUYFkKAzWAAnLC7FLVxLWDBLKCwaKTULgEwbLA4hJtOkSBNqITT3xEgfLpBtzE%2FjiuL04RGEBgwWhShRgQExHBAAh%2BQQJCgAAACwAAAAAIAAgAAAE7xDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfZiCqGk5dTESJeaOAlClzsJsqwiJwiqnFrb2nS9kmIcgEsjQydLiIlHehhpejaIjzh9eomSjZR%2BipslWIRLAgMDOR2DOqKogTB9pCUJBagDBXR6XB0EBkIIsaRsGGMMAxoDBgYHTKJiUYEGDAzHC9EACcUGkIgFzgwZ0QsSBcXHiQvOwgDdEwfFs0sDzt4S6BK4xYjkDOzn0unFeBzOBijIm1Dgmg5YFQwsCMjp1oJ8LyIAACH5BAkKAAAALAAAAAAgACAAAATwEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GGl6NoiPOH16iZKNlH6KmyWFOggHhEEvAwwMA0N9GBsEC6amhnVcEwavDAazGwIDaH1ipaYLBUTCGgQDA8NdHz0FpqgTBwsLqAbWAAnIA4FWKdMLGdYGEgraigbT0OITBcg5QwPT4xLrROZL6AuQAPUS7bxLpoWidY0JtxLHKhwwMJBTHgPKdEQAACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GAULDJCRiXo1CpGXDJOUjY%2BYip9DhToJA4RBLwMLCwVDfRgbBAaqqoZ1XBMHswsHtxtFaH1iqaoGNgAIxRpbFAgfPQSqpbgGBqUD1wBXeCYp1AYZ19JJOYgH1KwA4UBvQwXUBxPqVD9L3sbp2BNk2xvvFPJd%2BMFCN6HAAIKgNggY0KtEBAAh%2BQQJCgAAACwAAAAAIAAgAAAE6BDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfYIDMaAFdTESJeaEDAIMxYFqrOUaNW4E4ObYcCXaiBVEgULe0NJaxxtYksjh2NLkZISgDgJhHthkpU4mW6blRiYmZOlh4JWkDqILwUGBnE6TYEbCgevr0N1gH4At7gHiRpFaLNrrq8HNgAJA70AWxQIH1%2BvsYMDAzZQPC9VCNkDWUhGkuE5PxJNwiUK4UfLzOlD4WvzAHaoG9nxPi5d%2BjYUqfAhhykOFwJWiAAAIfkECQoAAAAsAAAAACAAIAAABPAQyElpUqnqzaciSoVkXVUMFaFSwlpOCcMYlErAavhOMnNLNo8KsZsMZItJEIDIFSkLGQoQTNhIsFehRww2CQLKF0tYGKYSg%2BygsZIuNqJksKgbfgIGepNo2cIUB3V1B3IvNiBYNQaDSTtfhhx0CwVPI0UJe0%2Bbm4g5VgcGoqOcnjmjqDSdnhgEoamcsZuXO1aWQy8KAwOAuTYYGwi7w5h%2BKr0SJ8MFihpNbx%2B4Erq7BYBuzsdiH1jCAzoSfl0rVirNbRXlBBlLX%2BBP0XJLAPGzTkAuAOqb0WT5AH7OcdCm5B8TgRwSRKIHQtaLCwg1RAAAOwAAAAAAAAAAAA%3D%3D); + + visibility: visible; + opacity: 0.6; + transition: all 0.3s ease; + } + + .reveal > .overlay header { + position: absolute; + left: 0; + top: 0; + width: 100%; + padding: $overlayHeaderPadding; + z-index: 2; + box-sizing: border-box; + } + .reveal > .overlay header a { + display: inline-block; + width: $overlayHeaderHeight; + height: $overlayHeaderHeight; + line-height: 36px; + padding: 0 10px; + float: right; + opacity: 0.6; + + box-sizing: border-box; + } + .reveal > .overlay header a:hover { + opacity: 1; + } + .reveal > .overlay header a .icon { + display: inline-block; + width: 20px; + height: 20px; + + background-position: 50% 50%; + background-size: 100%; + background-repeat: no-repeat; + } + .reveal > .overlay header a.close .icon { + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABkklEQVRYR8WX4VHDMAxG6wnoJrABZQPYBCaBTWAD2g1gE5gg6OOsXuxIlr40d81dfrSJ9V4c2VLK7spHuTJ/5wpM07QXuXc5X0opX2tEJcadjHuV80li/FgxTIEK/5QBCICBD6xEhSMGHgQPgBgLiYVAB1dpSqKDawxTohFw4JSEA3clzgIBPCURwE2JucBR7rhPJJv5OpJwDX+SfDjgx1wACQeJG1aChP9K/IMmdZ8DtESV1WyP3Bt4MwM6sj4NMxMYiqUWHQu4KYA/SYkIjOsm3BXYWMKFDwU2khjCQ4ELJUJ4SmClRArOCmSXGuKma0fYD5CbzHxFpCSGAhfAVSSUGDUk2BWZaff2g6GE15BsBQ9nwmpIGDiyHQddwNTMKkbZaf9fajXQca1EX44puJZUsnY0ObGmITE3GVLCbEhQUjGVt146j6oasWN+49Vph2w1pZ5EansNZqKBm1txbU57iRRcZ86RWMDdWtBJUHBHwoQPi1GV+JCbntmvok7iTX4/Up9mgyTc/FJYDTcndgH/AA5A/CHsyEkVAAAAAElFTkSuQmCC); + } + .reveal > .overlay header a.external .icon { + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAcElEQVRYR+2WSQoAIQwEzf8f7XiOMkUQxUPlGkM3hVmiQfQR9GYnH1SsAQlI4DiBqkCMoNb9y2e90IAEJPAcgdznU9+engMaeJ7Azh5Y1U67gAho4DqBqmB1buAf0MB1AlVBek83ZPkmJMGc1wAR+AAqod/B97TRpQAAAABJRU5ErkJggg==); + } + + .reveal > .overlay .viewport { + position: absolute; + display: flex; + top: $overlayHeaderHeight + $overlayHeaderPadding*2; + right: 0; + bottom: 0; + left: 0; + } + + .reveal > .overlay.overlay-preview .viewport iframe { + width: 100%; + height: 100%; + max-width: 100%; + max-height: 100%; + border: 0; + + opacity: 0; + visibility: hidden; + transition: all 0.3s ease; + } + + .reveal > .overlay.overlay-preview.loaded .viewport iframe { + opacity: 1; + visibility: visible; + } + + .reveal > .overlay.overlay-preview.loaded .viewport-inner { + position: absolute; + z-index: -1; + left: 0; + top: 45%; + width: 100%; + text-align: center; + letter-spacing: normal; + } + .reveal > .overlay.overlay-preview .x-frame-error { + opacity: 0; + transition: opacity 0.3s ease 0.3s; + } + .reveal > .overlay.overlay-preview.loaded .x-frame-error { + opacity: 1; + } + + .reveal > .overlay.overlay-preview.loaded .spinner { + opacity: 0; + visibility: hidden; + transform: scale(0.2); + } + + .reveal > .overlay.overlay-help .viewport { + overflow: auto; + color: #fff; + } + + .reveal > .overlay.overlay-help .viewport .viewport-inner { + width: 600px; + margin: auto; + padding: 20px 20px 80px 20px; + text-align: center; + letter-spacing: normal; + } + + .reveal > .overlay.overlay-help .viewport .viewport-inner .title { + font-size: 20px; + } + + .reveal > .overlay.overlay-help .viewport .viewport-inner table { + border: 1px solid #fff; + border-collapse: collapse; + font-size: 16px; + } + + .reveal > .overlay.overlay-help .viewport .viewport-inner table th, + .reveal > .overlay.overlay-help .viewport .viewport-inner table td { + width: 200px; + padding: 14px; + border: 1px solid #fff; + vertical-align: middle; + } + + .reveal > .overlay.overlay-help .viewport .viewport-inner table th { + padding-top: 20px; + padding-bottom: 20px; + } + + +/********************************************* + * PLAYBACK COMPONENT + *********************************************/ + +.reveal .playback { + position: absolute; + left: 15px; + bottom: 20px; + z-index: 30; + cursor: pointer; + transition: all 400ms ease; + -webkit-tap-highlight-color: rgba( 0, 0, 0, 0 ); +} + +.reveal.overview .playback { + opacity: 0; + visibility: hidden; +} + + +/********************************************* + * CODE HIGHLGIHTING + *********************************************/ + +.reveal .hljs { + min-height: 100%; +} + +.reveal .hljs table { + margin: initial; +} + +.reveal .hljs-ln-code, +.reveal .hljs-ln-numbers { + padding: 0; + border: 0; +} + +.reveal .hljs-ln-numbers { + opacity: 0.6; + padding-right: 0.75em; + text-align: right; + vertical-align: top; +} + +.reveal .hljs.has-highlights tr:not(.highlight-line) { + opacity: 0.4; +} + +.reveal .hljs:not(:first-child).fragment { + position: absolute; + top: 0; + left: 0; + width: 100%; + box-sizing: border-box; +} + +.reveal pre[data-auto-animate-target] { + overflow: hidden; +} +.reveal pre[data-auto-animate-target] code { + height: 100%; +} + + +/********************************************* + * ROLLING LINKS + *********************************************/ + +.reveal .roll { + display: inline-block; + line-height: 1.2; + overflow: hidden; + + vertical-align: top; + perspective: 400px; + perspective-origin: 50% 50%; +} + .reveal .roll:hover { + background: none; + text-shadow: none; + } +.reveal .roll span { + display: block; + position: relative; + padding: 0 2px; + + pointer-events: none; + transition: all 400ms ease; + transform-origin: 50% 0%; + transform-style: preserve-3d; + backface-visibility: hidden; +} + .reveal .roll:hover span { + background: rgba(0,0,0,0.5); + transform: translate3d( 0px, 0px, -45px ) rotateX( 90deg ); + } +.reveal .roll span:after { + content: attr(data-title); + + display: block; + position: absolute; + left: 0; + top: 0; + padding: 0 2px; + backface-visibility: hidden; + transform-origin: 50% 0%; + transform: translate3d( 0px, 110%, 0px ) rotateX( -90deg ); +} + + +/********************************************* + * SPEAKER NOTES + *********************************************/ + +$notesWidthPercent: 25%; + +// Hide on-page notes +.reveal aside.notes { + display: none; +} + +// An interface element that can optionally be used to show the +// speaker notes to all viewers, on top of the presentation +.reveal .speaker-notes { + display: none; + position: absolute; + width: math.div($notesWidthPercent, (1 - math.div($notesWidthPercent,100))) * 1%; + height: 100%; + top: 0; + left: 100%; + padding: 14px 18px 14px 18px; + z-index: 1; + font-size: 18px; + line-height: 1.4; + border: 1px solid rgba( 0, 0, 0, 0.05 ); + color: #222; + background-color: #f5f5f5; + overflow: auto; + box-sizing: border-box; + text-align: left; + font-family: Helvetica, sans-serif; + -webkit-overflow-scrolling: touch; + + .notes-placeholder { + color: #ccc; + font-style: italic; + } + + &:focus { + outline: none; + } + + &:before { + content: 'Speaker notes'; + display: block; + margin-bottom: 10px; + opacity: 0.5; + } +} + + +.reveal.show-notes { + max-width: 100% - $notesWidthPercent; + overflow: visible; +} + +.reveal.show-notes .speaker-notes { + display: block; +} + +@media screen and (min-width: 1600px) { + .reveal .speaker-notes { + font-size: 20px; + } +} + +@media screen and (max-width: 1024px) { + .reveal.show-notes { + border-left: 0; + max-width: none; + max-height: 70%; + max-height: 70vh; + overflow: visible; + } + + .reveal.show-notes .speaker-notes { + top: 100%; + left: 0; + width: 100%; + height: 30vh; + border: 0; + } +} + +@media screen and (max-width: 600px) { + .reveal.show-notes { + max-height: 60%; + max-height: 60vh; + } + + .reveal.show-notes .speaker-notes { + top: 100%; + height: 40vh; + } + + .reveal .speaker-notes { + font-size: 14px; + } +} + + +/********************************************* + * JUMP-TO-SLIDE COMPONENT + *********************************************/ + + .reveal .jump-to-slide { + position: absolute; + top: 15px; + left: 15px; + z-index: 30; + font-size: 32px; + -webkit-tap-highlight-color: rgba( 0, 0, 0, 0 ); +} + +.reveal .jump-to-slide-input { + background: transparent; + padding: 8px; + font-size: inherit; + color: currentColor; + border: 0; +} +.reveal .jump-to-slide-input::placeholder { + color: currentColor; + opacity: 0.5; +} + +.reveal.has-dark-background .jump-to-slide-input { + color: #fff; +} +.reveal.has-light-background .jump-to-slide-input { + color: #222; +} + +.reveal .jump-to-slide-input:focus { + outline: none; +} + + +/********************************************* + * ZOOM PLUGIN + *********************************************/ + +.zoomed .reveal *, +.zoomed .reveal *:before, +.zoomed .reveal *:after { + backface-visibility: visible !important; +} + +.zoomed .reveal .progress, +.zoomed .reveal .controls { + opacity: 0; +} + +.zoomed .reveal .roll span { + background: none; +} + +.zoomed .reveal .roll span:after { + visibility: hidden; +} + + +/********************************************* + * PRINT STYLES + *********************************************/ + +@import 'print/pdf.scss'; +@import 'print/paper.scss'; + diff --git a/revealjs/css/theme/README.md b/revealjs/css/theme/README.md new file mode 100644 index 0000000..30916c4 --- /dev/null +++ b/revealjs/css/theme/README.md @@ -0,0 +1,21 @@ +## Dependencies + +Themes are written using Sass to keep things modular and reduce the need for repeated selectors across files. Make sure that you have the reveal.js development environment installed before proceeding: https://revealjs.com/installation/#full-setup + +## Creating a Theme + +To create your own theme, start by duplicating a ```.scss``` file in [/css/theme/source](https://github.com/hakimel/reveal.js/blob/master/css/theme/source). It will be automatically compiled from Sass to CSS (see the [gulpfile](https://github.com/hakimel/reveal.js/blob/master/gulpfile.js)) when you run `npm run build -- css-themes`. + +Each theme file does four things in the following order: + +1. **Include [/css/theme/template/mixins.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/mixins.scss)** +Shared utility functions. + +2. **Include [/css/theme/template/settings.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/settings.scss)** +Declares a set of custom variables that the template file (step 4) expects. Can be overridden in step 3. + +3. **Override** +This is where you override the default theme. Either by specifying variables (see [settings.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/settings.scss) for reference) or by adding any selectors and styles you please. + +4. **Include [/css/theme/template/theme.scss](https://github.com/hakimel/reveal.js/blob/master/css/theme/template/theme.scss)** +The template theme file which will generate final CSS output based on the currently defined variables. diff --git a/revealjs/css/theme/source/beige.scss b/revealjs/css/theme/source/beige.scss new file mode 100644 index 0000000..1f60178 --- /dev/null +++ b/revealjs/css/theme/source/beige.scss @@ -0,0 +1,41 @@ +/** + * Beige theme for reveal.js. + * + * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +@import url(./fonts/league-gothic/league-gothic.css); +@import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic); + + +// Override theme settings (see ../template/settings.scss) +$mainColor: #333; +$headingColor: #333; +$headingTextShadow: none; +$backgroundColor: #f7f3de; +$linkColor: #8b743d; +$linkColorHover: lighten( $linkColor, 20% ); +$selectionBackgroundColor: rgba(79, 64, 28, 0.99); +$heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15); + +// Background generator +@mixin bodyBackground() { + @include radial-gradient( rgba(247,242,211,1), rgba(255,255,255,1) ); +} + +// Change text colors against dark slide backgrounds +@include dark-bg-text-color(#fff); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/black-contrast.scss b/revealjs/css/theme/source/black-contrast.scss new file mode 100644 index 0000000..9e1a2ca --- /dev/null +++ b/revealjs/css/theme/source/black-contrast.scss @@ -0,0 +1,49 @@ +/** + * Black compact & high contrast reveal.js theme, with headers not in capitals. + * + * By Peter Kehl. Based on black.(s)css by Hakim El Hattab, http://hakim.se + * + * - Keep the source similar to black.css - for easy comparison. + * - $mainFontSize controls code blocks, too (although under some ratio). + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + +// Include theme-specific fonts +@import url(./fonts/source-sans-pro/source-sans-pro.css); + + +// Override theme settings (see ../template/settings.scss) +$backgroundColor: #000000; + +$mainColor: #fff; +$headingColor: #fff; + +$mainFontSize: 42px; +$mainFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingTextShadow: none; +$headingLetterSpacing: normal; +$headingTextTransform: uppercase; +$headingFontWeight: 600; +$linkColor: #42affa; +$linkColorHover: lighten( $linkColor, 15% ); +$selectionBackgroundColor: lighten( $linkColor, 25% ); + +$heading1Size: 2.5em; +$heading2Size: 1.6em; +$heading3Size: 1.3em; +$heading4Size: 1.0em; + +// Change text colors against light slide backgrounds +@include light-bg-text-color(#000); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/black.scss b/revealjs/css/theme/source/black.scss new file mode 100644 index 0000000..7c655c4 --- /dev/null +++ b/revealjs/css/theme/source/black.scss @@ -0,0 +1,46 @@ +/** + * Black theme for reveal.js. This is the opposite of the 'white' theme. + * + * By Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + +// Include theme-specific fonts +@import url(./fonts/source-sans-pro/source-sans-pro.css); + + +// Override theme settings (see ../template/settings.scss) +$backgroundColor: #191919; + +$mainColor: #fff; +$headingColor: #fff; + +$mainFontSize: 42px; +$mainFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingTextShadow: none; +$headingLetterSpacing: normal; +$headingTextTransform: uppercase; +$headingFontWeight: 600; +$linkColor: #42affa; +$linkColorHover: lighten( $linkColor, 15% ); +$selectionBackgroundColor: rgba( $linkColor, 0.75 ); + +$heading1Size: 2.5em; +$heading2Size: 1.6em; +$heading3Size: 1.3em; +$heading4Size: 1.0em; + +// Change text colors against light slide backgrounds +@include light-bg-text-color(#222); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/blood.scss b/revealjs/css/theme/source/blood.scss new file mode 100644 index 0000000..b5a8679 --- /dev/null +++ b/revealjs/css/theme/source/blood.scss @@ -0,0 +1,87 @@ +/** + * Blood theme for reveal.js + * Author: Walther http://github.com/Walther + * + * Designed to be used with highlight.js theme + * "monokai_sublime.css" available from + * https://github.com/isagalaev/highlight.js/ + * + * For other themes, change $codeBackground accordingly. + * + */ + + // Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + +// Include theme-specific fonts + +@import url(https://fonts.googleapis.com/css?family=Ubuntu:300,700,300italic,700italic); + +// Colors used in the theme +$blood: #a23; +$coal: #222; +$codeBackground: #23241f; + +$backgroundColor: $coal; + +// Main text +$mainFont: Ubuntu, 'sans-serif'; +$mainColor: #eee; + +// Headings +$headingFont: Ubuntu, 'sans-serif'; +$headingTextShadow: 2px 2px 2px $coal; + +// h1 shadow, borrowed humbly from +// (c) Default theme by Hakim El Hattab +$heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15); + +// Links +$linkColor: $blood; +$linkColorHover: lighten( $linkColor, 20% ); + +// Text selection +$selectionBackgroundColor: $blood; +$selectionColor: #fff; + +// Change text colors against dark slide backgrounds +@include light-bg-text-color(#222); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- + +// some overrides after theme template import + +.reveal p { + font-weight: 300; + text-shadow: 1px 1px $coal; +} + +section.has-light-background { + p, h1, h2, h3, h4 { + text-shadow: none; + } +} + +.reveal h1, +.reveal h2, +.reveal h3, +.reveal h4, +.reveal h5, +.reveal h6 { + font-weight: 700; +} + +.reveal p code { + background-color: $codeBackground; + display: inline-block; + border-radius: 7px; +} + +.reveal small code { + vertical-align: baseline; +} \ No newline at end of file diff --git a/revealjs/css/theme/source/dracula.scss b/revealjs/css/theme/source/dracula.scss new file mode 100644 index 0000000..67fb59c --- /dev/null +++ b/revealjs/css/theme/source/dracula.scss @@ -0,0 +1,132 @@ +/** + * Dracula Dark theme for reveal.js. + * Based on https://draculatheme.com + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +$systemFontsSansSerif: -apple-system, + BlinkMacSystemFont, + avenir next, + avenir, + segoe ui, + helvetica neue, + helvetica, + Cantarell, + Ubuntu, + roboto, + noto, + arial, + sans-serif; +$systemFontsMono: Menlo, + Consolas, + Monaco, + Liberation Mono, + Lucida Console, + monospace; + +/** + * Dracula colors by Zeno Rocha + * https://draculatheme.com/contribute + */ +html * { + color-profile: sRGB; + rendering-intent: auto; +} + +$background: #282A36; +$foreground: #F8F8F2; +$selection: #44475A; +$comment: #6272A4; +$red: #FF5555; +$orange: #FFB86C; +$yellow: #F1FA8C; +$green: #50FA7B; +$purple: #BD93F9; +$cyan: #8BE9FD; +$pink: #FF79C6; + + + +// Override theme settings (see ../template/settings.scss) +$mainColor: $foreground; +$headingColor: $purple; +$headingTextShadow: none; +$headingTextTransform: none; +$backgroundColor: $background; +$linkColor: $pink; +$linkColorHover: $cyan; +$selectionBackgroundColor: $selection; +$inlineCodeColor: $green; +$listBulletColor: $cyan; + +$mainFont: $systemFontsSansSerif; +$codeFont: "Fira Code", $systemFontsMono; + +// Change text colors against light slide backgrounds +@include light-bg-text-color($background); + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- + +// Define additional color effects based on Dracula spec +// https://spec.draculatheme.com/ +:root { + --r-bold-color: #{$orange}; + --r-italic-color: #{$yellow}; + --r-inline-code-color: #{$inlineCodeColor}; + --r-list-bullet-color: #{$listBulletColor}; +} + +.reveal strong, .reveal b { + color: var(--r-bold-color); +} + +.reveal em, .reveal i, .reveal blockquote { + color: var(--r-italic-color); +} + +.reveal code { + color: var(--r-inline-code-color); +} + +// Dracula colored list bullets and numbers +.reveal ul { + list-style: none; +} + +.reveal ul li::before { + content: "•"; + color: var(--r-list-bullet-color); + display: inline-block; + width: 1em; + margin-left: -1em +} + +.reveal ol { + list-style: none; + counter-reset: li; +} + +.reveal ol li::before { + content: counter(li) "."; + color: var(--r-list-bullet-color); + display: inline-block; + width: 2em; + + margin-left: -2.5em; + margin-right: 0.5em; + text-align: right; +} + +.reveal ol li { + counter-increment: li +} diff --git a/revealjs/css/theme/source/league.scss b/revealjs/css/theme/source/league.scss new file mode 100644 index 0000000..ee01258 --- /dev/null +++ b/revealjs/css/theme/source/league.scss @@ -0,0 +1,36 @@ +/** + * League theme for reveal.js. + * + * This was the default theme pre-3.0.0. + * + * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +@import url(./fonts/league-gothic/league-gothic.css); +@import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic); + +// Override theme settings (see ../template/settings.scss) +$headingTextShadow: 0px 0px 6px rgba(0,0,0,0.2); +$heading1TextShadow: 0 1px 0 #ccc, 0 2px 0 #c9c9c9, 0 3px 0 #bbb, 0 4px 0 #b9b9b9, 0 5px 0 #aaa, 0 6px 1px rgba(0,0,0,.1), 0 0 5px rgba(0,0,0,.1), 0 1px 3px rgba(0,0,0,.3), 0 3px 5px rgba(0,0,0,.2), 0 5px 10px rgba(0,0,0,.25), 0 20px 20px rgba(0,0,0,.15); + +// Background generator +@mixin bodyBackground() { + @include radial-gradient( rgba(28,30,32,1), rgba(85,90,95,1) ); +} + +// Change text colors against light slide backgrounds +@include light-bg-text-color(#222); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/moon.scss b/revealjs/css/theme/source/moon.scss new file mode 100644 index 0000000..ff2074a --- /dev/null +++ b/revealjs/css/theme/source/moon.scss @@ -0,0 +1,58 @@ +/** + * Solarized Dark theme for reveal.js. + * Author: Achim Staebler + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +@import url(./fonts/league-gothic/league-gothic.css); +@import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic); + +/** + * Solarized colors by Ethan Schoonover + */ +html * { + color-profile: sRGB; + rendering-intent: auto; +} + +// Solarized colors +$base03: #002b36; +$base02: #073642; +$base01: #586e75; +$base00: #657b83; +$base0: #839496; +$base1: #93a1a1; +$base2: #eee8d5; +$base3: #fdf6e3; +$yellow: #b58900; +$orange: #cb4b16; +$red: #dc322f; +$magenta: #d33682; +$violet: #6c71c4; +$blue: #268bd2; +$cyan: #2aa198; +$green: #859900; + +// Override theme settings (see ../template/settings.scss) +$mainColor: $base1; +$headingColor: $base2; +$headingTextShadow: none; +$backgroundColor: $base03; +$linkColor: $blue; +$linkColorHover: lighten( $linkColor, 20% ); +$selectionBackgroundColor: $magenta; + +// Change text colors against light slide backgrounds +@include light-bg-text-color(#222); + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/night.scss b/revealjs/css/theme/source/night.scss new file mode 100644 index 0000000..98a2062 --- /dev/null +++ b/revealjs/css/theme/source/night.scss @@ -0,0 +1,37 @@ +/** + * Black theme for reveal.js. + * + * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + +// Include theme-specific fonts +@import url(https://fonts.googleapis.com/css?family=Montserrat:700); +@import url(https://fonts.googleapis.com/css?family=Open+Sans:400,700,400italic,700italic); + + +// Override theme settings (see ../template/settings.scss) +$backgroundColor: #111; + +$mainFont: 'Open Sans', sans-serif; +$linkColor: #e7ad52; +$linkColorHover: lighten( $linkColor, 20% ); +$headingFont: 'Montserrat', Impact, sans-serif; +$headingTextShadow: none; +$headingLetterSpacing: -0.03em; +$headingTextTransform: none; +$selectionBackgroundColor: #e7ad52; + +// Change text colors against light slide backgrounds +@include light-bg-text-color(#222); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- \ No newline at end of file diff --git a/revealjs/css/theme/source/serif.scss b/revealjs/css/theme/source/serif.scss new file mode 100644 index 0000000..1c8d778 --- /dev/null +++ b/revealjs/css/theme/source/serif.scss @@ -0,0 +1,38 @@ +/** + * A simple theme for reveal.js presentations, similar + * to the default theme. The accent color is brown. + * + * This theme is Copyright (C) 2012-2013 Owen Versteeg, http://owenversteeg.com - it is MIT licensed. + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Override theme settings (see ../template/settings.scss) +$mainFont: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif; +$mainColor: #000; +$headingFont: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif; +$headingColor: #383D3D; +$headingTextShadow: none; +$headingTextTransform: none; +$backgroundColor: #F0F1EB; +$linkColor: #51483D; +$linkColorHover: lighten( $linkColor, 20% ); +$selectionBackgroundColor: #26351C; + +.reveal a { + line-height: 1.3em; +} + +// Change text colors against dark slide backgrounds +@include dark-bg-text-color(#fff); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/simple.scss b/revealjs/css/theme/source/simple.scss new file mode 100644 index 0000000..faf245f --- /dev/null +++ b/revealjs/css/theme/source/simple.scss @@ -0,0 +1,40 @@ +/** + * A simple theme for reveal.js presentations, similar + * to the default theme. The accent color is darkblue. + * + * This theme is Copyright (C) 2012 Owen Versteeg, https://github.com/StereotypicalApps. It is MIT licensed. + * reveal.js is Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +@import url(https://fonts.googleapis.com/css?family=News+Cycle:400,700); +@import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic); + + +// Override theme settings (see ../template/settings.scss) +$mainFont: 'Lato', sans-serif; +$mainColor: #000; +$headingFont: 'News Cycle', Impact, sans-serif; +$headingColor: #000; +$headingTextShadow: none; +$headingTextTransform: none; +$backgroundColor: #fff; +$linkColor: #00008B; +$linkColorHover: lighten( $linkColor, 20% ); +$selectionBackgroundColor: rgba(0, 0, 0, 0.99); + +// Change text colors against dark slide backgrounds +@include dark-bg-text-color(#fff); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- \ No newline at end of file diff --git a/revealjs/css/theme/source/sky.scss b/revealjs/css/theme/source/sky.scss new file mode 100644 index 0000000..c83b9c0 --- /dev/null +++ b/revealjs/css/theme/source/sky.scss @@ -0,0 +1,49 @@ +/** + * Sky theme for reveal.js. + * + * Copyright (C) 2011-2012 Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +@import url(https://fonts.googleapis.com/css?family=Quicksand:400,700,400italic,700italic); +@import url(https://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700); + + +// Override theme settings (see ../template/settings.scss) +$mainFont: 'Open Sans', sans-serif; +$mainColor: #333; +$headingFont: 'Quicksand', sans-serif; +$headingColor: #333; +$headingLetterSpacing: -0.08em; +$headingTextShadow: none; +$backgroundColor: #f7fbfc; +$linkColor: #3b759e; +$linkColorHover: lighten( $linkColor, 20% ); +$selectionBackgroundColor: #134674; + +// Fix links so they are not cut off +.reveal a { + line-height: 1.3em; +} + +// Background generator +@mixin bodyBackground() { + @include radial-gradient( #add9e4, #f7fbfc ); +} + +// Change text colors against dark slide backgrounds +@include dark-bg-text-color(#fff); + + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/solarized.scss b/revealjs/css/theme/source/solarized.scss new file mode 100644 index 0000000..8bdf1eb --- /dev/null +++ b/revealjs/css/theme/source/solarized.scss @@ -0,0 +1,63 @@ +/** + * Solarized Light theme for reveal.js. + * Author: Achim Staebler + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + + +// Include theme-specific fonts +@import url(./fonts/league-gothic/league-gothic.css); +@import url(https://fonts.googleapis.com/css?family=Lato:400,700,400italic,700italic); + + +/** + * Solarized colors by Ethan Schoonover + */ +html * { + color-profile: sRGB; + rendering-intent: auto; +} + +// Solarized colors +$base03: #002b36; +$base02: #073642; +$base01: #586e75; +$base00: #657b83; +$base0: #839496; +$base1: #93a1a1; +$base2: #eee8d5; +$base3: #fdf6e3; +$yellow: #b58900; +$orange: #cb4b16; +$red: #dc322f; +$magenta: #d33682; +$violet: #6c71c4; +$blue: #268bd2; +$cyan: #2aa198; +$green: #859900; + +// Override theme settings (see ../template/settings.scss) +$mainColor: $base00; +$headingColor: $base01; +$headingTextShadow: none; +$backgroundColor: $base3; +$linkColor: $blue; +$linkColorHover: lighten( $linkColor, 20% ); +$selectionBackgroundColor: $magenta; + +// Background generator +// @mixin bodyBackground() { +// @include radial-gradient( rgba($base3,1), rgba(lighten($base3, 20%),1) ); +// } + + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/white-contrast.scss b/revealjs/css/theme/source/white-contrast.scss new file mode 100644 index 0000000..2a23ba4 --- /dev/null +++ b/revealjs/css/theme/source/white-contrast.scss @@ -0,0 +1,49 @@ +/** + * White compact & high contrast reveal.js theme, with headers not in capitals. + * + * By Peter Kehl. Based on white.(s)css by Hakim El Hattab, http://hakim.se + * + * - Keep the source similar to black.css - for easy comparison. + * - $mainFontSize controls code blocks, too (although under some ratio). + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + +// Include theme-specific fonts +@import url(./fonts/source-sans-pro/source-sans-pro.css); + + +// Override theme settings (see ../template/settings.scss) +$backgroundColor: #fff; + +$mainColor: #000; +$headingColor: #000; + +$mainFontSize: 42px; +$mainFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingTextShadow: none; +$headingLetterSpacing: normal; +$headingTextTransform: uppercase; +$headingFontWeight: 600; +$linkColor: #2a76dd; +$linkColorHover: lighten( $linkColor, 15% ); +$selectionBackgroundColor: lighten( $linkColor, 25% ); + +$heading1Size: 2.5em; +$heading2Size: 1.6em; +$heading3Size: 1.3em; +$heading4Size: 1.0em; + +// Change text colors against dark slide backgrounds +@include dark-bg-text-color(#fff); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/source/white.scss b/revealjs/css/theme/source/white.scss new file mode 100644 index 0000000..443d30a --- /dev/null +++ b/revealjs/css/theme/source/white.scss @@ -0,0 +1,46 @@ +/** + * White theme for reveal.js. This is the opposite of the 'black' theme. + * + * By Hakim El Hattab, http://hakim.se + */ + + +// Default mixins and settings ----------------- +@import "../template/mixins"; +@import "../template/settings"; +// --------------------------------------------- + + +// Include theme-specific fonts +@import url(./fonts/source-sans-pro/source-sans-pro.css); + + +// Override theme settings (see ../template/settings.scss) +$backgroundColor: #fff; + +$mainColor: #222; +$headingColor: #222; + +$mainFontSize: 42px; +$mainFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingFont: 'Source Sans Pro', Helvetica, sans-serif; +$headingTextShadow: none; +$headingLetterSpacing: normal; +$headingTextTransform: uppercase; +$headingFontWeight: 600; +$linkColor: #2a76dd; +$linkColorHover: lighten( $linkColor, 15% ); +$selectionBackgroundColor: lighten( $linkColor, 25% ); + +$heading1Size: 2.5em; +$heading2Size: 1.6em; +$heading3Size: 1.3em; +$heading4Size: 1.0em; + +// Change text colors against dark slide backgrounds +@include dark-bg-text-color(#fff); + + +// Theme template ------------------------------ +@import "../template/theme"; +// --------------------------------------------- diff --git a/revealjs/css/theme/template/exposer.scss b/revealjs/css/theme/template/exposer.scss new file mode 100644 index 0000000..4aec3e8 --- /dev/null +++ b/revealjs/css/theme/template/exposer.scss @@ -0,0 +1,28 @@ +// Exposes theme's variables for easy re-use in CSS for plugin authors + +:root { + --r-background-color: #{$backgroundColor}; + --r-main-font: #{$mainFont}; + --r-main-font-size: #{$mainFontSize}; + --r-main-color: #{$mainColor}; + --r-block-margin: #{$blockMargin}; + --r-heading-margin: #{$headingMargin}; + --r-heading-font: #{$headingFont}; + --r-heading-color: #{$headingColor}; + --r-heading-line-height: #{$headingLineHeight}; + --r-heading-letter-spacing: #{$headingLetterSpacing}; + --r-heading-text-transform: #{$headingTextTransform}; + --r-heading-text-shadow: #{$headingTextShadow}; + --r-heading-font-weight: #{$headingFontWeight}; + --r-heading1-text-shadow: #{$heading1TextShadow}; + --r-heading1-size: #{$heading1Size}; + --r-heading2-size: #{$heading2Size}; + --r-heading3-size: #{$heading3Size}; + --r-heading4-size: #{$heading4Size}; + --r-code-font: #{$codeFont}; + --r-link-color: #{$linkColor}; + --r-link-color-dark: #{darken($linkColor , 15% )}; + --r-link-color-hover: #{$linkColorHover}; + --r-selection-background-color: #{$selectionBackgroundColor}; + --r-selection-color: #{$selectionColor}; +} diff --git a/revealjs/css/theme/template/mixins.scss b/revealjs/css/theme/template/mixins.scss new file mode 100644 index 0000000..17a3db5 --- /dev/null +++ b/revealjs/css/theme/template/mixins.scss @@ -0,0 +1,45 @@ +@mixin vertical-gradient( $top, $bottom ) { + background: $top; + background: -moz-linear-gradient( top, $top 0%, $bottom 100% ); + background: -webkit-gradient( linear, left top, left bottom, color-stop(0%,$top), color-stop(100%,$bottom) ); + background: -webkit-linear-gradient( top, $top 0%, $bottom 100% ); + background: -o-linear-gradient( top, $top 0%, $bottom 100% ); + background: -ms-linear-gradient( top, $top 0%, $bottom 100% ); + background: linear-gradient( top, $top 0%, $bottom 100% ); +} + +@mixin horizontal-gradient( $top, $bottom ) { + background: $top; + background: -moz-linear-gradient( left, $top 0%, $bottom 100% ); + background: -webkit-gradient( linear, left top, right top, color-stop(0%,$top), color-stop(100%,$bottom) ); + background: -webkit-linear-gradient( left, $top 0%, $bottom 100% ); + background: -o-linear-gradient( left, $top 0%, $bottom 100% ); + background: -ms-linear-gradient( left, $top 0%, $bottom 100% ); + background: linear-gradient( left, $top 0%, $bottom 100% ); +} + +@mixin radial-gradient( $outer, $inner, $type: circle ) { + background: $outer; + background: -moz-radial-gradient( center, $type cover, $inner 0%, $outer 100% ); + background: -webkit-gradient( radial, center center, 0px, center center, 100%, color-stop(0%,$inner), color-stop(100%,$outer) ); + background: -webkit-radial-gradient( center, $type cover, $inner 0%, $outer 100% ); + background: -o-radial-gradient( center, $type cover, $inner 0%, $outer 100% ); + background: -ms-radial-gradient( center, $type cover, $inner 0%, $outer 100% ); + background: radial-gradient( center, $type cover, $inner 0%, $outer 100% ); +} + +@mixin light-bg-text-color( $color ) { + section.has-light-background { + &, h1, h2, h3, h4, h5, h6 { + color: $color; + } + } +} + +@mixin dark-bg-text-color( $color ) { + section.has-dark-background { + &, h1, h2, h3, h4, h5, h6 { + color: $color; + } + } +} \ No newline at end of file diff --git a/revealjs/css/theme/template/settings.scss b/revealjs/css/theme/template/settings.scss new file mode 100644 index 0000000..5a917f8 --- /dev/null +++ b/revealjs/css/theme/template/settings.scss @@ -0,0 +1,45 @@ +// Base settings for all themes that can optionally be +// overridden by the super-theme + +// Background of the presentation +$backgroundColor: #2b2b2b; + +// Primary/body text +$mainFont: 'Lato', sans-serif; +$mainFontSize: 40px; +$mainColor: #eee; + +// Vertical spacing between blocks of text +$blockMargin: 20px; + +// Headings +$headingMargin: 0 0 $blockMargin 0; +$headingFont: 'League Gothic', Impact, sans-serif; +$headingColor: #eee; +$headingLineHeight: 1.2; +$headingLetterSpacing: normal; +$headingTextTransform: uppercase; +$headingTextShadow: none; +$headingFontWeight: normal; +$heading1TextShadow: $headingTextShadow; + +$heading1Size: 3.77em; +$heading2Size: 2.11em; +$heading3Size: 1.55em; +$heading4Size: 1.00em; + +$codeFont: monospace; + +// Links and actions +$linkColor: #13DAEC; +$linkColorHover: lighten( $linkColor, 20% ); + +// Text selection +$selectionBackgroundColor: #FF5E99; +$selectionColor: #fff; + +// Generates the presentation background, can be overridden +// to return a background image or gradient +@mixin bodyBackground() { + background: $backgroundColor; +} diff --git a/revealjs/css/theme/template/theme.scss b/revealjs/css/theme/template/theme.scss new file mode 100644 index 0000000..bc377d3 --- /dev/null +++ b/revealjs/css/theme/template/theme.scss @@ -0,0 +1,331 @@ +// Base theme template for reveal.js + +/********************************************* + * GLOBAL STYLES + *********************************************/ + +@import "./exposer"; + +.reveal-viewport { + @include bodyBackground(); + background-color: var(--r-background-color); +} + +.reveal { + font-family: var(--r-main-font); + font-size: var(--r-main-font-size); + font-weight: normal; + color: var(--r-main-color); +} + +.reveal ::selection { + color: var(--r-selection-color); + background: var(--r-selection-background-color); + text-shadow: none; +} + +.reveal ::-moz-selection { + color: var(--r-selection-color); + background: var(--r-selection-background-color); + text-shadow: none; +} + +.reveal .slides section, +.reveal .slides section>section { + line-height: 1.3; + font-weight: inherit; +} + +/********************************************* + * HEADERS + *********************************************/ + +.reveal h1, +.reveal h2, +.reveal h3, +.reveal h4, +.reveal h5, +.reveal h6 { + margin: var(--r-heading-margin); + color: var(--r-heading-color); + + font-family: var(--r-heading-font); + font-weight: var(--r-heading-font-weight); + line-height: var(--r-heading-line-height); + letter-spacing: var(--r-heading-letter-spacing); + + text-transform: var(--r-heading-text-transform); + text-shadow: var(--r-heading-text-shadow); + + word-wrap: break-word; +} + +.reveal h1 {font-size: var(--r-heading1-size); } +.reveal h2 {font-size: var(--r-heading2-size); } +.reveal h3 {font-size: var(--r-heading3-size); } +.reveal h4 {font-size: var(--r-heading4-size); } + +.reveal h1 { + text-shadow: var(--r-heading1-text-shadow); +} + + +/********************************************* + * OTHER + *********************************************/ + +.reveal p { + margin: var(--r-block-margin) 0; + line-height: 1.3; +} + +/* Remove trailing margins after titles */ +.reveal h1:last-child, +.reveal h2:last-child, +.reveal h3:last-child, +.reveal h4:last-child, +.reveal h5:last-child, +.reveal h6:last-child { + margin-bottom: 0; +} + +/* Ensure certain elements are never larger than the slide itself */ +.reveal img, +.reveal video, +.reveal iframe { + max-width: 95%; + max-height: 95%; +} +.reveal strong, +.reveal b { + font-weight: bold; +} + +.reveal em { + font-style: italic; +} + +.reveal ol, +.reveal dl, +.reveal ul { + display: inline-block; + + text-align: left; + margin: 0 0 0 1em; +} + +.reveal ol { + list-style-type: decimal; +} + +.reveal ul { + list-style-type: disc; +} + +.reveal ul ul { + list-style-type: square; +} + +.reveal ul ul ul { + list-style-type: circle; +} + +.reveal ul ul, +.reveal ul ol, +.reveal ol ol, +.reveal ol ul { + display: block; + margin-left: 40px; +} + +.reveal dt { + font-weight: bold; +} + +.reveal dd { + margin-left: 40px; +} + +.reveal blockquote { + display: block; + position: relative; + width: 70%; + margin: var(--r-block-margin) auto; + padding: 5px; + + font-style: italic; + background: rgba(255, 255, 255, 0.05); + box-shadow: 0px 0px 2px rgba(0,0,0,0.2); +} + .reveal blockquote p:first-child, + .reveal blockquote p:last-child { + display: inline-block; + } + +.reveal q { + font-style: italic; +} + +.reveal pre { + display: block; + position: relative; + width: 90%; + margin: var(--r-block-margin) auto; + + text-align: left; + font-size: 0.55em; + font-family: var(--r-code-font); + line-height: 1.2em; + + word-wrap: break-word; + + box-shadow: 0px 5px 15px rgba(0, 0, 0, 0.15); +} + +.reveal code { + font-family: var(--r-code-font); + text-transform: none; + tab-size: 2; +} + +.reveal pre code { + display: block; + padding: 5px; + overflow: auto; + max-height: 400px; + word-wrap: normal; +} + +.reveal .code-wrapper { + white-space: normal; +} + +.reveal .code-wrapper code { + white-space: pre; +} + +.reveal table { + margin: auto; + border-collapse: collapse; + border-spacing: 0; +} + +.reveal table th { + font-weight: bold; +} + +.reveal table th, +.reveal table td { + text-align: left; + padding: 0.2em 0.5em 0.2em 0.5em; + border-bottom: 1px solid; +} + +.reveal table th[align="center"], +.reveal table td[align="center"] { + text-align: center; +} + +.reveal table th[align="right"], +.reveal table td[align="right"] { + text-align: right; +} + +.reveal table tbody tr:last-child th, +.reveal table tbody tr:last-child td { + border-bottom: none; +} + +.reveal sup { + vertical-align: super; + font-size: smaller; +} +.reveal sub { + vertical-align: sub; + font-size: smaller; +} + +.reveal small { + display: inline-block; + font-size: 0.6em; + line-height: 1.2em; + vertical-align: top; +} + +.reveal small * { + vertical-align: top; +} + +.reveal img { + margin: var(--r-block-margin) 0; +} + + +/********************************************* + * LINKS + *********************************************/ + +.reveal a { + color: var(--r-link-color); + text-decoration: none; + transition: color .15s ease; +} + .reveal a:hover { + color: var(--r-link-color-hover); + text-shadow: none; + border: none; + } + +.reveal .roll span:after { + color: #fff; + // background: darken( var(--r-link-color), 15% ); + background: var(--r-link-color-dark); + +} + + +/********************************************* + * Frame helper + *********************************************/ + +.reveal .r-frame { + border: 4px solid var(--r-main-color); + box-shadow: 0 0 10px rgba(0, 0, 0, 0.15); +} + +.reveal a .r-frame { + transition: all .15s linear; +} + +.reveal a:hover .r-frame { + border-color: var(--r-link-color); + box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); +} + + +/********************************************* + * NAVIGATION CONTROLS + *********************************************/ + +.reveal .controls { + color: var(--r-link-color); +} + + +/********************************************* + * PROGRESS BAR + *********************************************/ + +.reveal .progress { + background: rgba(0,0,0,0.2); + color: var(--r-link-color); +} + +/********************************************* + * PRINT BACKGROUND + *********************************************/ + @media print { + .backgrounds { + background-color: var(--r-background-color); + } +} diff --git a/revealjs/demo.html b/revealjs/demo.html new file mode 100644 index 0000000..d4787ea --- /dev/null +++ b/revealjs/demo.html @@ -0,0 +1,481 @@ + + + + + + + reveal.js – The HTML Presentation Framework + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

The HTML Presentation Framework

+

+ Created by Hakim El Hattab and contributors +

+
+ +
+

Hello There

+

+ reveal.js enables you to create beautiful interactive slide decks using HTML. This presentation will show you examples of what it can do. +

+
+ + +
+
+

Vertical Slides

+

Slides can be nested inside of each other.

+

Use the Space key to navigate through all slides.

+
+ + Down arrow + +
+
+

Basement Level 1

+

Nested slides are useful for adding additional detail underneath a high level horizontal slide.

+
+
+

Basement Level 2

+

That's it, time to go back up.

+
+ + Up arrow + +
+
+ +
+

Slides

+

+ Not a coder? Not a problem. There's a fully-featured visual editor for authoring these, try it out at https://slides.com. +

+
+ +
+

Hidden Slides

+

+ This slide is visible in the source, but hidden when the presentation is viewed. You can show all hidden slides by setting the `showHiddenSlides` config option to `true`. +

+
+ +
+

Pretty Code

+

+						import React, { useState } from 'react';
+
+						function Example() {
+						  const [count, setCount] = useState(0);
+
+						  return (
+						    ...
+						  );
+						}
+					
+

Code syntax highlighting courtesy of highlight.js.

+
+ +
+

With Animations

+
+
+ +
+

Point of View

+

+ Press ESC to enter the slide overview. +

+

+ Hold down the alt key (ctrl in Linux) and click on any element to zoom towards it using zoom.js. Click again to zoom back out. +

+

+ (NOTE: Use ctrl + click in Linux.) +

+
+ +
+

Auto-Animate

+

Automatically animate matching elements across slides with Auto-Animate.

+
+
+
+
+
+
+
+
+
+
+
+
+

Auto-Animate

+
+
+
+
+
+
+
+

Auto-Animate

+
+ +
+

Touch Optimized

+

+ Presentations look great on touch devices, like mobile phones and tablets. Simply swipe through your slides. +

+
+ +
+ +
+ +
+

Add the r-fit-text class to auto-size text

+

FIT TEXT

+
+ +
+
+

Fragments

+

Hit the next arrow...

+

... to step through ...

+

... a fragmented slide.

+ + +
+
+

Fragment Styles

+

There's different types of fragments, like:

+

grow

+

shrink

+

fade-out

+

+ fade-right, + up, + down, + left +

+

fade-in-then-out

+

fade-in-then-semi-out

+

Highlight red blue green

+
+
+ +
+

Transition Styles

+

+ You can select from different transitions, like:
+ None - + Fade - + Slide - + Convex - + Concave - + Zoom +

+
+ +
+

Themes

+

+ reveal.js comes with a few themes built in:
+ + Black (default) - + White - + League - + Sky - + Beige - + Simple
+ Serif - + Blood - + Night - + Moon - + Solarized +

+
+ +
+
+

Slide Backgrounds

+

+ Set data-background="#dddddd" on a slide to change the background color. All CSS color formats are supported. +

+ + Down arrow + +
+
+

Gradient Backgrounds

+
<section data-background-gradient=
+							"linear-gradient(to bottom, #ddd, #191919)">
+
+
+

Image Backgrounds

+
<section data-background="image.png">
+
+
+

Tiled Backgrounds

+
<section data-background="image.png" data-background-repeat="repeat" data-background-size="100px">
+
+
+
+

Video Backgrounds

+
<section data-background-video="video.mp4,video.webm">
+
+
+
+

... and GIFs!

+
+
+ +
+

Background Transitions

+

+ Different background transitions are available via the backgroundTransition option. This one's called "zoom". +

+
Reveal.configure({ backgroundTransition: 'zoom' })
+
+ +
+

Background Transitions

+

+ You can override background transitions per-slide. +

+
<section data-background-transition="zoom">
+
+ +
+
+

Iframe Backgrounds

+

Since reveal.js runs on the web, you can easily embed other web content. Try interacting with the page in the background.

+
+
+ +
+

Marvelous List

+
    +
  • No order here
  • +
  • Or here
  • +
  • Or here
  • +
  • Or here
  • +
+
+ +
+

Fantastic Ordered List

+
    +
  1. One is smaller than...
  2. +
  3. Two is smaller than...
  4. +
  5. Three!
  6. +
+
+ +
+

Tabular Tables

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ItemValueQuantity
Apples$17
Lemonade$218
Bread$32
+
+ +
+

Clever Quotes

+

+ These guys come in two forms, inline: The nice thing about standards is that there are so many to choose from and block: +

+
+ “For years there has been a theory that millions of monkeys typing at random on millions of typewriters would + reproduce the entire works of Shakespeare. The Internet has proven this theory to be untrue.” +
+
+ +
+

Intergalactic Interconnections

+

+ You can link between slides internally, + like this. +

+
+ +
+

Speaker View

+

There's a speaker view. It includes a timer, preview of the upcoming slide as well as your speaker notes.

+

Press the S key to try it out.

+ + +
+ +
+

Export to PDF

+

Presentations can be exported to PDF, here's an example:

+ +
+ +
+

Global State

+

+ Set data-state="something" on a slide and "something" + will be added as a class to the document element when the slide is open. This lets you + apply broader style changes, like switching the page background. +

+
+ +
+

State Events

+

+ Additionally custom events can be triggered on a per slide basis by binding to the data-state name. +

+

+Reveal.on( 'customevent', function() {
+	console.log( '"customevent" has fired' );
+} );
+					
+
+ +
+

Take a Moment

+

+ Press B or . on your keyboard to pause the presentation. This is helpful when you're on stage and want to take distracting slides off the screen. +

+
+ +
+

Much more

+ +
+ +
+

THE END

+

+ - Try the online editor
+ - Source code & documentation +

+
+ +
+ +
+ + + + + + + + + + + diff --git a/revealjs/dist/reset.css b/revealjs/dist/reset.css new file mode 100644 index 0000000..e238539 --- /dev/null +++ b/revealjs/dist/reset.css @@ -0,0 +1,30 @@ +/* http://meyerweb.com/eric/tools/css/reset/ + v4.0 | 20180602 + License: none (public domain) +*/ + +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, embed, +figure, figcaption, footer, header, hgroup, +main, menu, nav, output, ruby, section, summary, +time, mark, audio, video { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + font: inherit; + vertical-align: baseline; +} +/* HTML5 display-role reset for older browsers */ +article, aside, details, figcaption, figure, +footer, header, hgroup, main, menu, nav, section { + display: block; +} \ No newline at end of file diff --git a/revealjs/dist/reveal.css b/revealjs/dist/reveal.css new file mode 100644 index 0000000..b722f5e --- /dev/null +++ b/revealjs/dist/reveal.css @@ -0,0 +1,8 @@ +/*! +* reveal.js 4.5.0 +* https://revealjs.com +* MIT licensed +* +* Copyright (C) 2011-2023 Hakim El Hattab, https://hakim.se +*/ +.reveal .r-stretch,.reveal .stretch{max-width:none;max-height:none}.reveal pre.r-stretch code,.reveal pre.stretch code{height:100%;max-height:100%;box-sizing:border-box}.reveal .r-fit-text{display:inline-block;white-space:nowrap}.reveal .r-stack{display:grid}.reveal .r-stack>*{grid-area:1/1;margin:auto}.reveal .r-hstack,.reveal .r-vstack{display:flex}.reveal .r-hstack img,.reveal .r-hstack video,.reveal .r-vstack img,.reveal .r-vstack video{min-width:0;min-height:0;object-fit:contain}.reveal .r-vstack{flex-direction:column;align-items:center;justify-content:center}.reveal .r-hstack{flex-direction:row;align-items:center;justify-content:center}.reveal .items-stretch{align-items:stretch}.reveal .items-start{align-items:flex-start}.reveal .items-center{align-items:center}.reveal .items-end{align-items:flex-end}.reveal .justify-between{justify-content:space-between}.reveal .justify-around{justify-content:space-around}.reveal .justify-start{justify-content:flex-start}.reveal .justify-center{justify-content:center}.reveal .justify-end{justify-content:flex-end}html.reveal-full-page{width:100%;height:100%;height:100vh;height:calc(var(--vh,1vh) * 100);overflow:hidden}.reveal-viewport{height:100%;overflow:hidden;position:relative;line-height:1;margin:0;background-color:#fff;color:#000}.reveal-viewport:fullscreen{top:0!important;left:0!important;width:100%!important;height:100%!important;transform:none!important}.reveal .fragment{transition:all .2s ease}.reveal .fragment:not(.custom){opacity:0;visibility:hidden;will-change:opacity}.reveal .fragment.visible{opacity:1;visibility:inherit}.reveal .fragment.disabled{transition:none}.reveal .fragment.grow{opacity:1;visibility:inherit}.reveal .fragment.grow.visible{transform:scale(1.3)}.reveal .fragment.shrink{opacity:1;visibility:inherit}.reveal .fragment.shrink.visible{transform:scale(.7)}.reveal .fragment.zoom-in{transform:scale(.1)}.reveal .fragment.zoom-in.visible{transform:none}.reveal .fragment.fade-out{opacity:1;visibility:inherit}.reveal .fragment.fade-out.visible{opacity:0;visibility:hidden}.reveal .fragment.semi-fade-out{opacity:1;visibility:inherit}.reveal .fragment.semi-fade-out.visible{opacity:.5;visibility:inherit}.reveal .fragment.strike{opacity:1;visibility:inherit}.reveal .fragment.strike.visible{text-decoration:line-through}.reveal .fragment.fade-up{transform:translate(0,40px)}.reveal .fragment.fade-up.visible{transform:translate(0,0)}.reveal .fragment.fade-down{transform:translate(0,-40px)}.reveal .fragment.fade-down.visible{transform:translate(0,0)}.reveal .fragment.fade-right{transform:translate(-40px,0)}.reveal .fragment.fade-right.visible{transform:translate(0,0)}.reveal .fragment.fade-left{transform:translate(40px,0)}.reveal .fragment.fade-left.visible{transform:translate(0,0)}.reveal .fragment.current-visible,.reveal .fragment.fade-in-then-out{opacity:0;visibility:hidden}.reveal .fragment.current-visible.current-fragment,.reveal .fragment.fade-in-then-out.current-fragment{opacity:1;visibility:inherit}.reveal .fragment.fade-in-then-semi-out{opacity:0;visibility:hidden}.reveal .fragment.fade-in-then-semi-out.visible{opacity:.5;visibility:inherit}.reveal .fragment.fade-in-then-semi-out.current-fragment{opacity:1;visibility:inherit}.reveal .fragment.highlight-blue,.reveal .fragment.highlight-current-blue,.reveal .fragment.highlight-current-green,.reveal .fragment.highlight-current-red,.reveal .fragment.highlight-green,.reveal .fragment.highlight-red{opacity:1;visibility:inherit}.reveal .fragment.highlight-red.visible{color:#ff2c2d}.reveal .fragment.highlight-green.visible{color:#17ff2e}.reveal .fragment.highlight-blue.visible{color:#1b91ff}.reveal .fragment.highlight-current-red.current-fragment{color:#ff2c2d}.reveal .fragment.highlight-current-green.current-fragment{color:#17ff2e}.reveal .fragment.highlight-current-blue.current-fragment{color:#1b91ff}.reveal:after{content:"";font-style:italic}.reveal iframe{z-index:1}.reveal a{position:relative}@keyframes bounce-right{0%,10%,25%,40%,50%{transform:translateX(0)}20%{transform:translateX(10px)}30%{transform:translateX(-5px)}}@keyframes bounce-left{0%,10%,25%,40%,50%{transform:translateX(0)}20%{transform:translateX(-10px)}30%{transform:translateX(5px)}}@keyframes bounce-down{0%,10%,25%,40%,50%{transform:translateY(0)}20%{transform:translateY(10px)}30%{transform:translateY(-5px)}}.reveal .controls{display:none;position:absolute;top:auto;bottom:12px;right:12px;left:auto;z-index:11;color:#000;pointer-events:none;font-size:10px}.reveal .controls button{position:absolute;padding:0;background-color:transparent;border:0;outline:0;cursor:pointer;color:currentColor;transform:scale(.9999);transition:color .2s ease,opacity .2s ease,transform .2s ease;z-index:2;pointer-events:auto;font-size:inherit;visibility:hidden;opacity:0;-webkit-appearance:none;-webkit-tap-highlight-color:transparent}.reveal .controls .controls-arrow:after,.reveal .controls .controls-arrow:before{content:"";position:absolute;top:0;left:0;width:2.6em;height:.5em;border-radius:.25em;background-color:currentColor;transition:all .15s ease,background-color .8s ease;transform-origin:.2em 50%;will-change:transform}.reveal .controls .controls-arrow{position:relative;width:3.6em;height:3.6em}.reveal .controls .controls-arrow:before{transform:translateX(.5em) translateY(1.55em) rotate(45deg)}.reveal .controls .controls-arrow:after{transform:translateX(.5em) translateY(1.55em) rotate(-45deg)}.reveal .controls .controls-arrow:hover:before{transform:translateX(.5em) translateY(1.55em) rotate(40deg)}.reveal .controls .controls-arrow:hover:after{transform:translateX(.5em) translateY(1.55em) rotate(-40deg)}.reveal .controls .controls-arrow:active:before{transform:translateX(.5em) translateY(1.55em) rotate(36deg)}.reveal .controls .controls-arrow:active:after{transform:translateX(.5em) translateY(1.55em) rotate(-36deg)}.reveal .controls .navigate-left{right:6.4em;bottom:3.2em;transform:translateX(-10px)}.reveal .controls .navigate-left.highlight{animation:bounce-left 2s 50 both ease-out}.reveal .controls .navigate-right{right:0;bottom:3.2em;transform:translateX(10px)}.reveal .controls .navigate-right .controls-arrow{transform:rotate(180deg)}.reveal .controls .navigate-right.highlight{animation:bounce-right 2s 50 both ease-out}.reveal .controls .navigate-up{right:3.2em;bottom:6.4em;transform:translateY(-10px)}.reveal .controls .navigate-up .controls-arrow{transform:rotate(90deg)}.reveal .controls .navigate-down{right:3.2em;bottom:-1.4em;padding-bottom:1.4em;transform:translateY(10px)}.reveal .controls .navigate-down .controls-arrow{transform:rotate(-90deg)}.reveal .controls .navigate-down.highlight{animation:bounce-down 2s 50 both ease-out}.reveal .controls[data-controls-back-arrows=faded] .navigate-up.enabled{opacity:.3}.reveal .controls[data-controls-back-arrows=faded] .navigate-up.enabled:hover{opacity:1}.reveal .controls[data-controls-back-arrows=hidden] .navigate-up.enabled{opacity:0;visibility:hidden}.reveal .controls .enabled{visibility:visible;opacity:.9;cursor:pointer;transform:none}.reveal .controls .enabled.fragmented{opacity:.5}.reveal .controls .enabled.fragmented:hover,.reveal .controls .enabled:hover{opacity:1}.reveal:not(.rtl) .controls[data-controls-back-arrows=faded] .navigate-left.enabled{opacity:.3}.reveal:not(.rtl) .controls[data-controls-back-arrows=faded] .navigate-left.enabled:hover{opacity:1}.reveal:not(.rtl) .controls[data-controls-back-arrows=hidden] .navigate-left.enabled{opacity:0;visibility:hidden}.reveal.rtl .controls[data-controls-back-arrows=faded] .navigate-right.enabled{opacity:.3}.reveal.rtl .controls[data-controls-back-arrows=faded] .navigate-right.enabled:hover{opacity:1}.reveal.rtl .controls[data-controls-back-arrows=hidden] .navigate-right.enabled{opacity:0;visibility:hidden}.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-down,.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-up{display:none}.reveal:not(.has-vertical-slides) .controls .navigate-left,.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-left{bottom:1.4em;right:5.5em}.reveal:not(.has-vertical-slides) .controls .navigate-right,.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-right{bottom:1.4em;right:.5em}.reveal:not(.has-horizontal-slides) .controls .navigate-up{right:1.4em;bottom:5em}.reveal:not(.has-horizontal-slides) .controls .navigate-down{right:1.4em;bottom:.5em}.reveal.has-dark-background .controls{color:#fff}.reveal.has-light-background .controls{color:#000}.reveal.no-hover .controls .controls-arrow:active:before,.reveal.no-hover .controls .controls-arrow:hover:before{transform:translateX(.5em) translateY(1.55em) rotate(45deg)}.reveal.no-hover .controls .controls-arrow:active:after,.reveal.no-hover .controls .controls-arrow:hover:after{transform:translateX(.5em) translateY(1.55em) rotate(-45deg)}@media screen and (min-width:500px){.reveal .controls[data-controls-layout=edges]{top:0;right:0;bottom:0;left:0}.reveal .controls[data-controls-layout=edges] .navigate-down,.reveal .controls[data-controls-layout=edges] .navigate-left,.reveal .controls[data-controls-layout=edges] .navigate-right,.reveal .controls[data-controls-layout=edges] .navigate-up{bottom:auto;right:auto}.reveal .controls[data-controls-layout=edges] .navigate-left{top:50%;left:.8em;margin-top:-1.8em}.reveal .controls[data-controls-layout=edges] .navigate-right{top:50%;right:.8em;margin-top:-1.8em}.reveal .controls[data-controls-layout=edges] .navigate-up{top:.8em;left:50%;margin-left:-1.8em}.reveal .controls[data-controls-layout=edges] .navigate-down{bottom:-.3em;left:50%;margin-left:-1.8em}}.reveal .progress{position:absolute;display:none;height:3px;width:100%;bottom:0;left:0;z-index:10;background-color:rgba(0,0,0,.2);color:#fff}.reveal .progress:after{content:"";display:block;position:absolute;height:10px;width:100%;top:-10px}.reveal .progress span{display:block;height:100%;width:100%;background-color:currentColor;transition:transform .8s cubic-bezier(.26,.86,.44,.985);transform-origin:0 0;transform:scaleX(0)}.reveal .slide-number{position:absolute;display:block;right:8px;bottom:8px;z-index:31;font-family:Helvetica,sans-serif;font-size:12px;line-height:1;color:#fff;background-color:rgba(0,0,0,.4);padding:5px}.reveal .slide-number a{color:currentColor}.reveal .slide-number-delimiter{margin:0 3px}.reveal{position:relative;width:100%;height:100%;overflow:hidden;touch-action:pinch-zoom}.reveal.embedded{touch-action:pan-y}.reveal .slides{position:absolute;width:100%;height:100%;top:0;right:0;bottom:0;left:0;margin:auto;pointer-events:none;overflow:visible;z-index:1;text-align:center;perspective:600px;perspective-origin:50% 40%}.reveal .slides>section{perspective:600px}.reveal .slides>section,.reveal .slides>section>section{display:none;position:absolute;width:100%;pointer-events:auto;z-index:10;transform-style:flat;transition:transform-origin .8s cubic-bezier(.26,.86,.44,.985),transform .8s cubic-bezier(.26,.86,.44,.985),visibility .8s cubic-bezier(.26,.86,.44,.985),opacity .8s cubic-bezier(.26,.86,.44,.985)}.reveal[data-transition-speed=fast] .slides section{transition-duration:.4s}.reveal[data-transition-speed=slow] .slides section{transition-duration:1.2s}.reveal .slides section[data-transition-speed=fast]{transition-duration:.4s}.reveal .slides section[data-transition-speed=slow]{transition-duration:1.2s}.reveal .slides>section.stack{padding-top:0;padding-bottom:0;pointer-events:none;height:100%}.reveal .slides>section.present,.reveal .slides>section>section.present{display:block;z-index:11;opacity:1}.reveal .slides>section:empty,.reveal .slides>section>section:empty,.reveal .slides>section>section[data-background-interactive],.reveal .slides>section[data-background-interactive]{pointer-events:none}.reveal.center,.reveal.center .slides,.reveal.center .slides section{min-height:0!important}.reveal .slides>section:not(.present),.reveal .slides>section>section:not(.present){pointer-events:none}.reveal.overview .slides>section,.reveal.overview .slides>section>section{pointer-events:auto}.reveal .slides>section.future,.reveal .slides>section.future>section,.reveal .slides>section.past,.reveal .slides>section.past>section,.reveal .slides>section>section.future,.reveal .slides>section>section.past{opacity:0}.reveal .slides>section[data-transition=slide].past,.reveal .slides>section[data-transition~=slide-out].past,.reveal.slide .slides>section:not([data-transition]).past{transform:translate(-150%,0)}.reveal .slides>section[data-transition=slide].future,.reveal .slides>section[data-transition~=slide-in].future,.reveal.slide .slides>section:not([data-transition]).future{transform:translate(150%,0)}.reveal .slides>section>section[data-transition=slide].past,.reveal .slides>section>section[data-transition~=slide-out].past,.reveal.slide .slides>section>section:not([data-transition]).past{transform:translate(0,-150%)}.reveal .slides>section>section[data-transition=slide].future,.reveal .slides>section>section[data-transition~=slide-in].future,.reveal.slide .slides>section>section:not([data-transition]).future{transform:translate(0,150%)}.reveal .slides>section[data-transition=linear].past,.reveal .slides>section[data-transition~=linear-out].past,.reveal.linear .slides>section:not([data-transition]).past{transform:translate(-150%,0)}.reveal .slides>section[data-transition=linear].future,.reveal .slides>section[data-transition~=linear-in].future,.reveal.linear .slides>section:not([data-transition]).future{transform:translate(150%,0)}.reveal .slides>section>section[data-transition=linear].past,.reveal .slides>section>section[data-transition~=linear-out].past,.reveal.linear .slides>section>section:not([data-transition]).past{transform:translate(0,-150%)}.reveal .slides>section>section[data-transition=linear].future,.reveal .slides>section>section[data-transition~=linear-in].future,.reveal.linear .slides>section>section:not([data-transition]).future{transform:translate(0,150%)}.reveal .slides section[data-transition=default].stack,.reveal.default .slides section.stack{transform-style:preserve-3d}.reveal .slides>section[data-transition=default].past,.reveal .slides>section[data-transition~=default-out].past,.reveal.default .slides>section:not([data-transition]).past{transform:translate3d(-100%,0,0) rotateY(-90deg) translate3d(-100%,0,0)}.reveal .slides>section[data-transition=default].future,.reveal .slides>section[data-transition~=default-in].future,.reveal.default .slides>section:not([data-transition]).future{transform:translate3d(100%,0,0) rotateY(90deg) translate3d(100%,0,0)}.reveal .slides>section>section[data-transition=default].past,.reveal .slides>section>section[data-transition~=default-out].past,.reveal.default .slides>section>section:not([data-transition]).past{transform:translate3d(0,-300px,0) rotateX(70deg) translate3d(0,-300px,0)}.reveal .slides>section>section[data-transition=default].future,.reveal .slides>section>section[data-transition~=default-in].future,.reveal.default .slides>section>section:not([data-transition]).future{transform:translate3d(0,300px,0) rotateX(-70deg) translate3d(0,300px,0)}.reveal .slides section[data-transition=convex].stack,.reveal.convex .slides section.stack{transform-style:preserve-3d}.reveal .slides>section[data-transition=convex].past,.reveal .slides>section[data-transition~=convex-out].past,.reveal.convex .slides>section:not([data-transition]).past{transform:translate3d(-100%,0,0) rotateY(-90deg) translate3d(-100%,0,0)}.reveal .slides>section[data-transition=convex].future,.reveal .slides>section[data-transition~=convex-in].future,.reveal.convex .slides>section:not([data-transition]).future{transform:translate3d(100%,0,0) rotateY(90deg) translate3d(100%,0,0)}.reveal .slides>section>section[data-transition=convex].past,.reveal .slides>section>section[data-transition~=convex-out].past,.reveal.convex .slides>section>section:not([data-transition]).past{transform:translate3d(0,-300px,0) rotateX(70deg) translate3d(0,-300px,0)}.reveal .slides>section>section[data-transition=convex].future,.reveal .slides>section>section[data-transition~=convex-in].future,.reveal.convex .slides>section>section:not([data-transition]).future{transform:translate3d(0,300px,0) rotateX(-70deg) translate3d(0,300px,0)}.reveal .slides section[data-transition=concave].stack,.reveal.concave .slides section.stack{transform-style:preserve-3d}.reveal .slides>section[data-transition=concave].past,.reveal .slides>section[data-transition~=concave-out].past,.reveal.concave .slides>section:not([data-transition]).past{transform:translate3d(-100%,0,0) rotateY(90deg) translate3d(-100%,0,0)}.reveal .slides>section[data-transition=concave].future,.reveal .slides>section[data-transition~=concave-in].future,.reveal.concave .slides>section:not([data-transition]).future{transform:translate3d(100%,0,0) rotateY(-90deg) translate3d(100%,0,0)}.reveal .slides>section>section[data-transition=concave].past,.reveal .slides>section>section[data-transition~=concave-out].past,.reveal.concave .slides>section>section:not([data-transition]).past{transform:translate3d(0,-80%,0) rotateX(-70deg) translate3d(0,-80%,0)}.reveal .slides>section>section[data-transition=concave].future,.reveal .slides>section>section[data-transition~=concave-in].future,.reveal.concave .slides>section>section:not([data-transition]).future{transform:translate3d(0,80%,0) rotateX(70deg) translate3d(0,80%,0)}.reveal .slides section[data-transition=zoom],.reveal.zoom .slides section:not([data-transition]){transition-timing-function:ease}.reveal .slides>section[data-transition=zoom].past,.reveal .slides>section[data-transition~=zoom-out].past,.reveal.zoom .slides>section:not([data-transition]).past{visibility:hidden;transform:scale(16)}.reveal .slides>section[data-transition=zoom].future,.reveal .slides>section[data-transition~=zoom-in].future,.reveal.zoom .slides>section:not([data-transition]).future{visibility:hidden;transform:scale(.2)}.reveal .slides>section>section[data-transition=zoom].past,.reveal .slides>section>section[data-transition~=zoom-out].past,.reveal.zoom .slides>section>section:not([data-transition]).past{transform:scale(16)}.reveal .slides>section>section[data-transition=zoom].future,.reveal .slides>section>section[data-transition~=zoom-in].future,.reveal.zoom .slides>section>section:not([data-transition]).future{transform:scale(.2)}.reveal.cube .slides{perspective:1300px}.reveal.cube .slides section{padding:30px;min-height:700px;backface-visibility:hidden;box-sizing:border-box;transform-style:preserve-3d}.reveal.center.cube .slides section{min-height:0}.reveal.cube .slides section:not(.stack):before{content:"";position:absolute;display:block;width:100%;height:100%;left:0;top:0;background:rgba(0,0,0,.1);border-radius:4px;transform:translateZ(-20px)}.reveal.cube .slides section:not(.stack):after{content:"";position:absolute;display:block;width:90%;height:30px;left:5%;bottom:0;background:0 0;z-index:1;border-radius:4px;box-shadow:0 95px 25px rgba(0,0,0,.2);transform:translateZ(-90px) rotateX(65deg)}.reveal.cube .slides>section.stack{padding:0;background:0 0}.reveal.cube .slides>section.past{transform-origin:100% 0;transform:translate3d(-100%,0,0) rotateY(-90deg)}.reveal.cube .slides>section.future{transform-origin:0 0;transform:translate3d(100%,0,0) rotateY(90deg)}.reveal.cube .slides>section>section.past{transform-origin:0 100%;transform:translate3d(0,-100%,0) rotateX(90deg)}.reveal.cube .slides>section>section.future{transform-origin:0 0;transform:translate3d(0,100%,0) rotateX(-90deg)}.reveal.page .slides{perspective-origin:0 50%;perspective:3000px}.reveal.page .slides section{padding:30px;min-height:700px;box-sizing:border-box;transform-style:preserve-3d}.reveal.page .slides section.past{z-index:12}.reveal.page .slides section:not(.stack):before{content:"";position:absolute;display:block;width:100%;height:100%;left:0;top:0;background:rgba(0,0,0,.1);transform:translateZ(-20px)}.reveal.page .slides section:not(.stack):after{content:"";position:absolute;display:block;width:90%;height:30px;left:5%;bottom:0;background:0 0;z-index:1;border-radius:4px;box-shadow:0 95px 25px rgba(0,0,0,.2);-webkit-transform:translateZ(-90px) rotateX(65deg)}.reveal.page .slides>section.stack{padding:0;background:0 0}.reveal.page .slides>section.past{transform-origin:0 0;transform:translate3d(-40%,0,0) rotateY(-80deg)}.reveal.page .slides>section.future{transform-origin:100% 0;transform:translate3d(0,0,0)}.reveal.page .slides>section>section.past{transform-origin:0 0;transform:translate3d(0,-40%,0) rotateX(80deg)}.reveal.page .slides>section>section.future{transform-origin:0 100%;transform:translate3d(0,0,0)}.reveal .slides section[data-transition=fade],.reveal.fade .slides section:not([data-transition]),.reveal.fade .slides>section>section:not([data-transition]){transform:none;transition:opacity .5s}.reveal.fade.overview .slides section,.reveal.fade.overview .slides>section>section{transition:none}.reveal .slides section[data-transition=none],.reveal.none .slides section:not([data-transition]){transform:none;transition:none}.reveal .pause-overlay{position:absolute;top:0;left:0;width:100%;height:100%;background:#000;visibility:hidden;opacity:0;z-index:100;transition:all 1s ease}.reveal .pause-overlay .resume-button{position:absolute;bottom:20px;right:20px;color:#ccc;border-radius:2px;padding:6px 14px;border:2px solid #ccc;font-size:16px;background:0 0;cursor:pointer}.reveal .pause-overlay .resume-button:hover{color:#fff;border-color:#fff}.reveal.paused .pause-overlay{visibility:visible;opacity:1}.reveal .no-transition,.reveal .no-transition *,.reveal .slides.disable-slide-transitions section{transition:none!important}.reveal .slides.disable-slide-transitions section{transform:none!important}.reveal .backgrounds{position:absolute;width:100%;height:100%;top:0;left:0;perspective:600px}.reveal .slide-background{display:none;position:absolute;width:100%;height:100%;opacity:0;visibility:hidden;overflow:hidden;background-color:rgba(0,0,0,0);transition:all .8s cubic-bezier(.26,.86,.44,.985)}.reveal .slide-background-content{position:absolute;width:100%;height:100%;background-position:50% 50%;background-repeat:no-repeat;background-size:cover}.reveal .slide-background.stack{display:block}.reveal .slide-background.present{opacity:1;visibility:visible;z-index:2}.print-pdf .reveal .slide-background{opacity:1!important;visibility:visible!important}.reveal .slide-background video{position:absolute;width:100%;height:100%;max-width:none;max-height:none;top:0;left:0;object-fit:cover}.reveal .slide-background[data-background-size=contain] video{object-fit:contain}.reveal>.backgrounds .slide-background[data-background-transition=none],.reveal[data-background-transition=none]>.backgrounds .slide-background:not([data-background-transition]){transition:none}.reveal>.backgrounds .slide-background[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background:not([data-background-transition]){opacity:1}.reveal>.backgrounds .slide-background.past[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background.past:not([data-background-transition]){transform:translate(-100%,0)}.reveal>.backgrounds .slide-background.future[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background.future:not([data-background-transition]){transform:translate(100%,0)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){transform:translate(0,-100%)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){transform:translate(0,100%)}.reveal>.backgrounds .slide-background.past[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(-100%,0,0) rotateY(-90deg) translate3d(-100%,0,0)}.reveal>.backgrounds .slide-background.future[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(100%,0,0) rotateY(90deg) translate3d(100%,0,0)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(0,-100%,0) rotateX(90deg) translate3d(0,-100%,0)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(0,100%,0) rotateX(-90deg) translate3d(0,100%,0)}.reveal>.backgrounds .slide-background.past[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(-100%,0,0) rotateY(90deg) translate3d(-100%,0,0)}.reveal>.backgrounds .slide-background.future[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(100%,0,0) rotateY(-90deg) translate3d(100%,0,0)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(0,-100%,0) rotateX(-90deg) translate3d(0,-100%,0)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(0,100%,0) rotateX(90deg) translate3d(0,100%,0)}.reveal>.backgrounds .slide-background[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background:not([data-background-transition]){transition-timing-function:ease}.reveal>.backgrounds .slide-background.past[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background.past:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(16)}.reveal>.backgrounds .slide-background.future[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background.future:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(.2)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(16)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(.2)}.reveal[data-transition-speed=fast]>.backgrounds .slide-background{transition-duration:.4s}.reveal[data-transition-speed=slow]>.backgrounds .slide-background{transition-duration:1.2s}.reveal [data-auto-animate-target^=unmatched]{will-change:opacity}.reveal section[data-auto-animate]:not(.stack):not([data-auto-animate=running]) [data-auto-animate-target^=unmatched]{opacity:0}.reveal.overview{perspective-origin:50% 50%;perspective:700px}.reveal.overview .slides{-moz-transform-style:preserve-3d}.reveal.overview .slides section{height:100%;top:0!important;opacity:1!important;overflow:hidden;visibility:visible!important;cursor:pointer;box-sizing:border-box}.reveal.overview .slides section.present,.reveal.overview .slides section:hover{outline:10px solid rgba(150,150,150,.4);outline-offset:10px}.reveal.overview .slides section .fragment{opacity:1;transition:none}.reveal.overview .slides section:after,.reveal.overview .slides section:before{display:none!important}.reveal.overview .slides>section.stack{padding:0;top:0!important;background:0 0;outline:0;overflow:visible}.reveal.overview .backgrounds{perspective:inherit;-moz-transform-style:preserve-3d}.reveal.overview .backgrounds .slide-background{opacity:1;visibility:visible;outline:10px solid rgba(150,150,150,.1);outline-offset:10px}.reveal.overview .backgrounds .slide-background.stack{overflow:visible}.reveal.overview .slides section,.reveal.overview-deactivating .slides section{transition:none}.reveal.overview .backgrounds .slide-background,.reveal.overview-deactivating .backgrounds .slide-background{transition:none}.reveal.rtl .slides,.reveal.rtl .slides h1,.reveal.rtl .slides h2,.reveal.rtl .slides h3,.reveal.rtl .slides h4,.reveal.rtl .slides h5,.reveal.rtl .slides h6{direction:rtl;font-family:sans-serif}.reveal.rtl code,.reveal.rtl pre{direction:ltr}.reveal.rtl ol,.reveal.rtl ul{text-align:right}.reveal.rtl .progress span{transform-origin:100% 0}.reveal.has-parallax-background .backgrounds{transition:all .8s ease}.reveal.has-parallax-background[data-transition-speed=fast] .backgrounds{transition-duration:.4s}.reveal.has-parallax-background[data-transition-speed=slow] .backgrounds{transition-duration:1.2s}.reveal>.overlay{position:absolute;top:0;left:0;width:100%;height:100%;z-index:1000;background:rgba(0,0,0,.9);transition:all .3s ease}.reveal>.overlay .spinner{position:absolute;display:block;top:50%;left:50%;width:32px;height:32px;margin:-16px 0 0 -16px;z-index:10;background-image:url(data:image/gif;base64,R0lGODlhIAAgAPMAAJmZmf%2F%2F%2F6%2Bvr8nJybW1tcDAwOjo6Nvb26ioqKOjo7Ozs%2FLy8vz8%2FAAAAAAAAAAAACH%2FC05FVFNDQVBFMi4wAwEAAAAh%2FhpDcmVhdGVkIHdpdGggYWpheGxvYWQuaW5mbwAh%2BQQJCgAAACwAAAAAIAAgAAAE5xDISWlhperN52JLhSSdRgwVo1ICQZRUsiwHpTJT4iowNS8vyW2icCF6k8HMMBkCEDskxTBDAZwuAkkqIfxIQyhBQBFvAQSDITM5VDW6XNE4KagNh6Bgwe60smQUB3d4Rz1ZBApnFASDd0hihh12BkE9kjAJVlycXIg7CQIFA6SlnJ87paqbSKiKoqusnbMdmDC2tXQlkUhziYtyWTxIfy6BE8WJt5YJvpJivxNaGmLHT0VnOgSYf0dZXS7APdpB309RnHOG5gDqXGLDaC457D1zZ%2FV%2FnmOM82XiHRLYKhKP1oZmADdEAAAh%2BQQJCgAAACwAAAAAIAAgAAAE6hDISWlZpOrNp1lGNRSdRpDUolIGw5RUYhhHukqFu8DsrEyqnWThGvAmhVlteBvojpTDDBUEIFwMFBRAmBkSgOrBFZogCASwBDEY%2FCZSg7GSE0gSCjQBMVG023xWBhklAnoEdhQEfyNqMIcKjhRsjEdnezB%2BA4k8gTwJhFuiW4dokXiloUepBAp5qaKpp6%2BHo7aWW54wl7obvEe0kRuoplCGepwSx2jJvqHEmGt6whJpGpfJCHmOoNHKaHx61WiSR92E4lbFoq%2BB6QDtuetcaBPnW6%2BO7wDHpIiK9SaVK5GgV543tzjgGcghAgAh%2BQQJCgAAACwAAAAAIAAgAAAE7hDISSkxpOrN5zFHNWRdhSiVoVLHspRUMoyUakyEe8PTPCATW9A14E0UvuAKMNAZKYUZCiBMuBakSQKG8G2FzUWox2AUtAQFcBKlVQoLgQReZhQlCIJesQXI5B0CBnUMOxMCenoCfTCEWBsJColTMANldx15BGs8B5wlCZ9Po6OJkwmRpnqkqnuSrayqfKmqpLajoiW5HJq7FL1Gr2mMMcKUMIiJgIemy7xZtJsTmsM4xHiKv5KMCXqfyUCJEonXPN2rAOIAmsfB3uPoAK%2B%2BG%2Bw48edZPK%2BM6hLJpQg484enXIdQFSS1u6UhksENEQAAIfkECQoAAAAsAAAAACAAIAAABOcQyEmpGKLqzWcZRVUQnZYg1aBSh2GUVEIQ2aQOE%2BG%2BcD4ntpWkZQj1JIiZIogDFFyHI0UxQwFugMSOFIPJftfVAEoZLBbcLEFhlQiqGp1Vd140AUklUN3eCA51C1EWMzMCezCBBmkxVIVHBWd3HHl9JQOIJSdSnJ0TDKChCwUJjoWMPaGqDKannasMo6WnM562R5YluZRwur0wpgqZE7NKUm%2BFNRPIhjBJxKZteWuIBMN4zRMIVIhffcgojwCF117i4nlLnY5ztRLsnOk%2BaV%2BoJY7V7m76PdkS4trKcdg0Zc0tTcKkRAAAIfkECQoAAAAsAAAAACAAIAAABO4QyEkpKqjqzScpRaVkXZWQEximw1BSCUEIlDohrft6cpKCk5xid5MNJTaAIkekKGQkWyKHkvhKsR7ARmitkAYDYRIbUQRQjWBwJRzChi9CRlBcY1UN4g0%2FVNB0AlcvcAYHRyZPdEQFYV8ccwR5HWxEJ02YmRMLnJ1xCYp0Y5idpQuhopmmC2KgojKasUQDk5BNAwwMOh2RtRq5uQuPZKGIJQIGwAwGf6I0JXMpC8C7kXWDBINFMxS4DKMAWVWAGYsAdNqW5uaRxkSKJOZKaU3tPOBZ4DuK2LATgJhkPJMgTwKCdFjyPHEnKxFCDhEAACH5BAkKAAAALAAAAAAgACAAAATzEMhJaVKp6s2nIkolIJ2WkBShpkVRWqqQrhLSEu9MZJKK9y1ZrqYK9WiClmvoUaF8gIQSNeF1Er4MNFn4SRSDARWroAIETg1iVwuHjYB1kYc1mwruwXKC9gmsJXliGxc%2BXiUCby9ydh1sOSdMkpMTBpaXBzsfhoc5l58Gm5yToAaZhaOUqjkDgCWNHAULCwOLaTmzswadEqggQwgHuQsHIoZCHQMMQgQGubVEcxOPFAcMDAYUA85eWARmfSRQCdcMe0zeP1AAygwLlJtPNAAL19DARdPzBOWSm1brJBi45soRAWQAAkrQIykShQ9wVhHCwCQCACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiRMDjI0Fd30%2FiI2UA5GSS5UDj2l6NoqgOgN4gksEBgYFf0FDqKgHnyZ9OX8HrgYHdHpcHQULXAS2qKpENRg7eAMLC7kTBaixUYFkKAzWAAnLC7FLVxLWDBLKCwaKTULgEwbLA4hJtOkSBNqITT3xEgfLpBtzE%2FjiuL04RGEBgwWhShRgQExHBAAh%2BQQJCgAAACwAAAAAIAAgAAAE7xDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfZiCqGk5dTESJeaOAlClzsJsqwiJwiqnFrb2nS9kmIcgEsjQydLiIlHehhpejaIjzh9eomSjZR%2BipslWIRLAgMDOR2DOqKogTB9pCUJBagDBXR6XB0EBkIIsaRsGGMMAxoDBgYHTKJiUYEGDAzHC9EACcUGkIgFzgwZ0QsSBcXHiQvOwgDdEwfFs0sDzt4S6BK4xYjkDOzn0unFeBzOBijIm1Dgmg5YFQwsCMjp1oJ8LyIAACH5BAkKAAAALAAAAAAgACAAAATwEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GGl6NoiPOH16iZKNlH6KmyWFOggHhEEvAwwMA0N9GBsEC6amhnVcEwavDAazGwIDaH1ipaYLBUTCGgQDA8NdHz0FpqgTBwsLqAbWAAnIA4FWKdMLGdYGEgraigbT0OITBcg5QwPT4xLrROZL6AuQAPUS7bxLpoWidY0JtxLHKhwwMJBTHgPKdEQAACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GAULDJCRiXo1CpGXDJOUjY%2BYip9DhToJA4RBLwMLCwVDfRgbBAaqqoZ1XBMHswsHtxtFaH1iqaoGNgAIxRpbFAgfPQSqpbgGBqUD1wBXeCYp1AYZ19JJOYgH1KwA4UBvQwXUBxPqVD9L3sbp2BNk2xvvFPJd%2BMFCN6HAAIKgNggY0KtEBAAh%2BQQJCgAAACwAAAAAIAAgAAAE6BDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfYIDMaAFdTESJeaEDAIMxYFqrOUaNW4E4ObYcCXaiBVEgULe0NJaxxtYksjh2NLkZISgDgJhHthkpU4mW6blRiYmZOlh4JWkDqILwUGBnE6TYEbCgevr0N1gH4At7gHiRpFaLNrrq8HNgAJA70AWxQIH1%2BvsYMDAzZQPC9VCNkDWUhGkuE5PxJNwiUK4UfLzOlD4WvzAHaoG9nxPi5d%2BjYUqfAhhykOFwJWiAAAIfkECQoAAAAsAAAAACAAIAAABPAQyElpUqnqzaciSoVkXVUMFaFSwlpOCcMYlErAavhOMnNLNo8KsZsMZItJEIDIFSkLGQoQTNhIsFehRww2CQLKF0tYGKYSg%2BygsZIuNqJksKgbfgIGepNo2cIUB3V1B3IvNiBYNQaDSTtfhhx0CwVPI0UJe0%2Bbm4g5VgcGoqOcnjmjqDSdnhgEoamcsZuXO1aWQy8KAwOAuTYYGwi7w5h%2BKr0SJ8MFihpNbx%2B4Erq7BYBuzsdiH1jCAzoSfl0rVirNbRXlBBlLX%2BBP0XJLAPGzTkAuAOqb0WT5AH7OcdCm5B8TgRwSRKIHQtaLCwg1RAAAOwAAAAAAAAAAAA%3D%3D);visibility:visible;opacity:.6;transition:all .3s ease}.reveal>.overlay header{position:absolute;left:0;top:0;width:100%;padding:5px;z-index:2;box-sizing:border-box}.reveal>.overlay header a{display:inline-block;width:40px;height:40px;line-height:36px;padding:0 10px;float:right;opacity:.6;box-sizing:border-box}.reveal>.overlay header a:hover{opacity:1}.reveal>.overlay header a .icon{display:inline-block;width:20px;height:20px;background-position:50% 50%;background-size:100%;background-repeat:no-repeat}.reveal>.overlay header a.close .icon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABkklEQVRYR8WX4VHDMAxG6wnoJrABZQPYBCaBTWAD2g1gE5gg6OOsXuxIlr40d81dfrSJ9V4c2VLK7spHuTJ/5wpM07QXuXc5X0opX2tEJcadjHuV80li/FgxTIEK/5QBCICBD6xEhSMGHgQPgBgLiYVAB1dpSqKDawxTohFw4JSEA3clzgIBPCURwE2JucBR7rhPJJv5OpJwDX+SfDjgx1wACQeJG1aChP9K/IMmdZ8DtESV1WyP3Bt4MwM6sj4NMxMYiqUWHQu4KYA/SYkIjOsm3BXYWMKFDwU2khjCQ4ELJUJ4SmClRArOCmSXGuKma0fYD5CbzHxFpCSGAhfAVSSUGDUk2BWZaff2g6GE15BsBQ9nwmpIGDiyHQddwNTMKkbZaf9fajXQca1EX44puJZUsnY0ObGmITE3GVLCbEhQUjGVt146j6oasWN+49Vph2w1pZ5EansNZqKBm1txbU57iRRcZ86RWMDdWtBJUHBHwoQPi1GV+JCbntmvok7iTX4/Up9mgyTc/FJYDTcndgH/AA5A/CHsyEkVAAAAAElFTkSuQmCC)}.reveal>.overlay header a.external .icon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAcElEQVRYR+2WSQoAIQwEzf8f7XiOMkUQxUPlGkM3hVmiQfQR9GYnH1SsAQlI4DiBqkCMoNb9y2e90IAEJPAcgdznU9+engMaeJ7Azh5Y1U67gAho4DqBqmB1buAf0MB1AlVBek83ZPkmJMGc1wAR+AAqod/B97TRpQAAAABJRU5ErkJggg==)}.reveal>.overlay .viewport{position:absolute;display:flex;top:50px;right:0;bottom:0;left:0}.reveal>.overlay.overlay-preview .viewport iframe{width:100%;height:100%;max-width:100%;max-height:100%;border:0;opacity:0;visibility:hidden;transition:all .3s ease}.reveal>.overlay.overlay-preview.loaded .viewport iframe{opacity:1;visibility:visible}.reveal>.overlay.overlay-preview.loaded .viewport-inner{position:absolute;z-index:-1;left:0;top:45%;width:100%;text-align:center;letter-spacing:normal}.reveal>.overlay.overlay-preview .x-frame-error{opacity:0;transition:opacity .3s ease .3s}.reveal>.overlay.overlay-preview.loaded .x-frame-error{opacity:1}.reveal>.overlay.overlay-preview.loaded .spinner{opacity:0;visibility:hidden;transform:scale(.2)}.reveal>.overlay.overlay-help .viewport{overflow:auto;color:#fff}.reveal>.overlay.overlay-help .viewport .viewport-inner{width:600px;margin:auto;padding:20px 20px 80px 20px;text-align:center;letter-spacing:normal}.reveal>.overlay.overlay-help .viewport .viewport-inner .title{font-size:20px}.reveal>.overlay.overlay-help .viewport .viewport-inner table{border:1px solid #fff;border-collapse:collapse;font-size:16px}.reveal>.overlay.overlay-help .viewport .viewport-inner table td,.reveal>.overlay.overlay-help .viewport .viewport-inner table th{width:200px;padding:14px;border:1px solid #fff;vertical-align:middle}.reveal>.overlay.overlay-help .viewport .viewport-inner table th{padding-top:20px;padding-bottom:20px}.reveal .playback{position:absolute;left:15px;bottom:20px;z-index:30;cursor:pointer;transition:all .4s ease;-webkit-tap-highlight-color:transparent}.reveal.overview .playback{opacity:0;visibility:hidden}.reveal .hljs{min-height:100%}.reveal .hljs table{margin:initial}.reveal .hljs-ln-code,.reveal .hljs-ln-numbers{padding:0;border:0}.reveal .hljs-ln-numbers{opacity:.6;padding-right:.75em;text-align:right;vertical-align:top}.reveal .hljs.has-highlights tr:not(.highlight-line){opacity:.4}.reveal .hljs:not(:first-child).fragment{position:absolute;top:0;left:0;width:100%;box-sizing:border-box}.reveal pre[data-auto-animate-target]{overflow:hidden}.reveal pre[data-auto-animate-target] code{height:100%}.reveal .roll{display:inline-block;line-height:1.2;overflow:hidden;vertical-align:top;perspective:400px;perspective-origin:50% 50%}.reveal .roll:hover{background:0 0;text-shadow:none}.reveal .roll span{display:block;position:relative;padding:0 2px;pointer-events:none;transition:all .4s ease;transform-origin:50% 0;transform-style:preserve-3d;backface-visibility:hidden}.reveal .roll:hover span{background:rgba(0,0,0,.5);transform:translate3d(0,0,-45px) rotateX(90deg)}.reveal .roll span:after{content:attr(data-title);display:block;position:absolute;left:0;top:0;padding:0 2px;backface-visibility:hidden;transform-origin:50% 0;transform:translate3d(0,110%,0) rotateX(-90deg)}.reveal aside.notes{display:none}.reveal .speaker-notes{display:none;position:absolute;width:33.3333333333%;height:100%;top:0;left:100%;padding:14px 18px 14px 18px;z-index:1;font-size:18px;line-height:1.4;border:1px solid rgba(0,0,0,.05);color:#222;background-color:#f5f5f5;overflow:auto;box-sizing:border-box;text-align:left;font-family:Helvetica,sans-serif;-webkit-overflow-scrolling:touch}.reveal .speaker-notes .notes-placeholder{color:#ccc;font-style:italic}.reveal .speaker-notes:focus{outline:0}.reveal .speaker-notes:before{content:"Speaker notes";display:block;margin-bottom:10px;opacity:.5}.reveal.show-notes{max-width:75%;overflow:visible}.reveal.show-notes .speaker-notes{display:block}@media screen and (min-width:1600px){.reveal .speaker-notes{font-size:20px}}@media screen and (max-width:1024px){.reveal.show-notes{border-left:0;max-width:none;max-height:70%;max-height:70vh;overflow:visible}.reveal.show-notes .speaker-notes{top:100%;left:0;width:100%;height:30vh;border:0}}@media screen and (max-width:600px){.reveal.show-notes{max-height:60%;max-height:60vh}.reveal.show-notes .speaker-notes{top:100%;height:40vh}.reveal .speaker-notes{font-size:14px}}.reveal .jump-to-slide{position:absolute;top:15px;left:15px;z-index:30;font-size:32px;-webkit-tap-highlight-color:transparent}.reveal .jump-to-slide-input{background:0 0;padding:8px;font-size:inherit;color:currentColor;border:0}.reveal .jump-to-slide-input::placeholder{color:currentColor;opacity:.5}.reveal.has-dark-background .jump-to-slide-input{color:#fff}.reveal.has-light-background .jump-to-slide-input{color:#222}.reveal .jump-to-slide-input:focus{outline:0}.zoomed .reveal *,.zoomed .reveal :after,.zoomed .reveal :before{backface-visibility:visible!important}.zoomed .reveal .controls,.zoomed .reveal .progress{opacity:0}.zoomed .reveal .roll span{background:0 0}.zoomed .reveal .roll span:after{visibility:hidden}html.print-pdf *{-webkit-print-color-adjust:exact}html.print-pdf{width:100%;height:100%;overflow:visible}html.print-pdf body{margin:0 auto!important;border:0;padding:0;float:none!important;overflow:visible}html.print-pdf .nestedarrow,html.print-pdf .reveal .controls,html.print-pdf .reveal .playback,html.print-pdf .reveal .progress,html.print-pdf .reveal.overview,html.print-pdf .state-background{display:none!important}html.print-pdf .reveal pre code{overflow:hidden!important;font-family:Courier,"Courier New",monospace!important}html.print-pdf .reveal{width:auto!important;height:auto!important;overflow:hidden!important}html.print-pdf .reveal .slides{position:static;width:100%!important;height:auto!important;zoom:1!important;pointer-events:initial;left:auto;top:auto;margin:0!important;padding:0!important;overflow:visible;display:block;perspective:none;perspective-origin:50% 50%}html.print-pdf .reveal .slides .pdf-page{position:relative;overflow:hidden;z-index:1;page-break-after:always}html.print-pdf .reveal .slides section{visibility:visible!important;display:block!important;position:absolute!important;margin:0!important;padding:0!important;box-sizing:border-box!important;min-height:1px;opacity:1!important;transform-style:flat!important;transform:none!important}html.print-pdf .reveal section.stack{position:relative!important;margin:0!important;padding:0!important;page-break-after:avoid!important;height:auto!important;min-height:auto!important}html.print-pdf .reveal img{box-shadow:none}html.print-pdf .reveal .backgrounds{display:none}html.print-pdf .reveal .slide-background{display:block!important;position:absolute;top:0;left:0;width:100%;height:100%;z-index:auto!important}html.print-pdf .reveal.show-notes{max-width:none;max-height:none}html.print-pdf .reveal .speaker-notes-pdf{display:block;width:100%;height:auto;max-height:none;top:auto;right:auto;bottom:auto;left:auto;z-index:100}html.print-pdf .reveal .speaker-notes-pdf[data-layout=separate-page]{position:relative;color:inherit;background-color:transparent;padding:20px;page-break-after:always;border:0}html.print-pdf .reveal .slide-number-pdf{display:block;position:absolute;font-size:14px}html.print-pdf .aria-status{display:none}@media print{html:not(.print-pdf){overflow:visible;width:auto;height:auto}html:not(.print-pdf) body{margin:0;padding:0;overflow:visible}html:not(.print-pdf) .reveal{background:#fff;font-size:20pt}html:not(.print-pdf) .reveal .backgrounds,html:not(.print-pdf) .reveal .controls,html:not(.print-pdf) .reveal .progress,html:not(.print-pdf) .reveal .slide-number,html:not(.print-pdf) .reveal .state-background{display:none!important}html:not(.print-pdf) .reveal li,html:not(.print-pdf) .reveal p,html:not(.print-pdf) .reveal td{font-size:20pt!important;color:#000}html:not(.print-pdf) .reveal h1,html:not(.print-pdf) .reveal h2,html:not(.print-pdf) .reveal h3,html:not(.print-pdf) .reveal h4,html:not(.print-pdf) .reveal h5,html:not(.print-pdf) .reveal h6{color:#000!important;height:auto;line-height:normal;text-align:left;letter-spacing:normal}html:not(.print-pdf) .reveal h1{font-size:28pt!important}html:not(.print-pdf) .reveal h2{font-size:24pt!important}html:not(.print-pdf) .reveal h3{font-size:22pt!important}html:not(.print-pdf) .reveal h4{font-size:22pt!important;font-variant:small-caps}html:not(.print-pdf) .reveal h5{font-size:21pt!important}html:not(.print-pdf) .reveal h6{font-size:20pt!important;font-style:italic}html:not(.print-pdf) .reveal a:link,html:not(.print-pdf) .reveal a:visited{color:#000!important;font-weight:700;text-decoration:underline}html:not(.print-pdf) .reveal div,html:not(.print-pdf) .reveal ol,html:not(.print-pdf) .reveal p,html:not(.print-pdf) .reveal ul{visibility:visible;position:static;width:auto;height:auto;display:block;overflow:visible;margin:0;text-align:left!important}html:not(.print-pdf) .reveal pre,html:not(.print-pdf) .reveal table{margin-left:0;margin-right:0}html:not(.print-pdf) .reveal pre code{padding:20px}html:not(.print-pdf) .reveal blockquote{margin:20px 0}html:not(.print-pdf) .reveal .slides{position:static!important;width:auto!important;height:auto!important;left:0!important;top:0!important;margin-left:0!important;margin-top:0!important;padding:0!important;zoom:1!important;transform:none!important;overflow:visible!important;display:block!important;text-align:left!important;perspective:none;perspective-origin:50% 50%}html:not(.print-pdf) .reveal .slides section{visibility:visible!important;position:static!important;width:auto!important;height:auto!important;display:block!important;overflow:visible!important;left:0!important;top:0!important;margin-left:0!important;margin-top:0!important;padding:60px 20px!important;z-index:auto!important;opacity:1!important;page-break-after:always!important;transform-style:flat!important;transform:none!important;transition:none!important}html:not(.print-pdf) .reveal .slides section.stack{padding:0!important}html:not(.print-pdf) .reveal .slides section:last-of-type{page-break-after:avoid!important}html:not(.print-pdf) .reveal .slides section .fragment{opacity:1!important;visibility:visible!important;transform:none!important}html:not(.print-pdf) .reveal .r-fit-text{white-space:normal!important}html:not(.print-pdf) .reveal section img{display:block;margin:15px 0;background:#fff;border:1px solid #666;box-shadow:none}html:not(.print-pdf) .reveal section small{font-size:.8em}html:not(.print-pdf) .reveal .hljs{max-height:100%;white-space:pre-wrap;word-wrap:break-word;word-break:break-word;font-size:15pt}html:not(.print-pdf) .reveal .hljs .hljs-ln-numbers{white-space:nowrap}html:not(.print-pdf) .reveal .hljs td{font-size:inherit!important;color:inherit!important}} \ No newline at end of file diff --git a/revealjs/dist/reveal.esm.js b/revealjs/dist/reveal.esm.js new file mode 100644 index 0000000..be243b7 --- /dev/null +++ b/revealjs/dist/reveal.esm.js @@ -0,0 +1,9 @@ +/*! +* reveal.js 4.5.0 +* https://revealjs.com +* MIT licensed +* +* Copyright (C) 2011-2023 Hakim El Hattab, https://hakim.se +*/ +const e=(e,t)=>{for(let i in t)e[i]=t[i];return e},t=(e,t)=>Array.from(e.querySelectorAll(t)),i=(e,t,i)=>{i?e.classList.add(t):e.classList.remove(t)},n=e=>{if("string"==typeof e){if("null"===e)return null;if("true"===e)return!0;if("false"===e)return!1;if(e.match(/^-?[\d\.]+$/))return parseFloat(e)}return e},s=(e,t)=>{e.style.transform=t},a=(e,t)=>{let i=e.matches||e.matchesSelector||e.msMatchesSelector;return!(!i||!i.call(e,t))},r=(e,t)=>{if("function"==typeof e.closest)return e.closest(t);for(;e;){if(a(e,t))return e;e=e.parentNode}return null},o=(e,t,i,n="")=>{let s=e.querySelectorAll("."+i);for(let t=0;t{let t=document.createElement("style");return t.type="text/css",e&&e.length>0&&(t.styleSheet?t.styleSheet.cssText=e:t.appendChild(document.createTextNode(e))),document.head.appendChild(t),t},d=()=>{let e={};location.search.replace(/[A-Z0-9]+?=([\w\.%-]*)/gi,(t=>{e[t.split("=").shift()]=t.split("=").pop()}));for(let t in e){let i=e[t];e[t]=n(unescape(i))}return void 0!==e.dependencies&&delete e.dependencies,e},c=(e,t=0)=>{if(e){let i,n=e.style.height;return e.style.height="0px",e.parentNode.style.height="auto",i=t-e.parentNode.offsetHeight,e.style.height=n+"px",e.parentNode.style.removeProperty("height"),i}return t},h={mp4:"video/mp4",m4a:"video/mp4",ogv:"video/ogg",mpeg:"video/mpeg",webm:"video/webm"},u=navigator.userAgent,g=/(iphone|ipod|ipad|android)/gi.test(u)||"MacIntel"===navigator.platform&&navigator.maxTouchPoints>1;/chrome/i.test(u)&&/edge/i.test(u);const v=/android/gi.test(u);var p={};Object.defineProperty(p,"__esModule",{value:!0});var m=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{};return"string"==typeof e?x(t(document.querySelectorAll(e)),i):x([e],i)[0]}}("undefined"==typeof window?null:window);class b{constructor(e){this.Reveal=e,this.startEmbeddedIframe=this.startEmbeddedIframe.bind(this)}shouldPreload(e){let t=this.Reveal.getConfig().preloadIframes;return"boolean"!=typeof t&&(t=e.hasAttribute("data-preload")),t}load(e,i={}){e.style.display=this.Reveal.getConfig().display,t(e,"img[data-src], video[data-src], audio[data-src], iframe[data-src]").forEach((e=>{("IFRAME"!==e.tagName||this.shouldPreload(e))&&(e.setAttribute("src",e.getAttribute("data-src")),e.setAttribute("data-lazy-loaded",""),e.removeAttribute("data-src"))})),t(e,"video, audio").forEach((e=>{let i=0;t(e,"source[data-src]").forEach((e=>{e.setAttribute("src",e.getAttribute("data-src")),e.removeAttribute("data-src"),e.setAttribute("data-lazy-loaded",""),i+=1})),g&&"VIDEO"===e.tagName&&e.setAttribute("playsinline",""),i>0&&e.load()}));let n=e.slideBackgroundElement;if(n){n.style.display="block";let t=e.slideBackgroundContentElement,s=e.getAttribute("data-background-iframe");if(!1===n.hasAttribute("data-loaded")){n.setAttribute("data-loaded","true");let a=e.getAttribute("data-background-image"),r=e.getAttribute("data-background-video"),o=e.hasAttribute("data-background-video-loop"),l=e.hasAttribute("data-background-video-muted");if(a)/^data:/.test(a.trim())?t.style.backgroundImage=`url(${a.trim()})`:t.style.backgroundImage=a.split(",").map((e=>`url(${((e="")=>encodeURI(e).replace(/%5B/g,"[").replace(/%5D/g,"]").replace(/[!'()*]/g,(e=>`%${e.charCodeAt(0).toString(16).toUpperCase()}`)))(decodeURI(e.trim()))})`)).join(",");else if(r&&!this.Reveal.isSpeakerNotes()){let e=document.createElement("video");o&&e.setAttribute("loop",""),l&&(e.muted=!0),g&&(e.muted=!0,e.setAttribute("playsinline","")),r.split(",").forEach((t=>{let i=((e="")=>h[e.split(".").pop()])(t);e.innerHTML+=i?``:``})),t.appendChild(e)}else if(s&&!0!==i.excludeIframes){let e=document.createElement("iframe");e.setAttribute("allowfullscreen",""),e.setAttribute("mozallowfullscreen",""),e.setAttribute("webkitallowfullscreen",""),e.setAttribute("allow","autoplay"),e.setAttribute("data-src",s),e.style.width="100%",e.style.height="100%",e.style.maxHeight="100%",e.style.maxWidth="100%",t.appendChild(e)}}let a=t.querySelector("iframe[data-src]");a&&this.shouldPreload(n)&&!/autoplay=(1|true|yes)/gi.test(s)&&a.getAttribute("src")!==s&&a.setAttribute("src",s)}this.layout(e)}layout(e){Array.from(e.querySelectorAll(".r-fit-text")).forEach((e=>{f(e,{minSize:24,maxSize:.8*this.Reveal.getConfig().height,observeMutations:!1,observeWindow:!1})}))}unload(e){e.style.display="none";let i=this.Reveal.getSlideBackground(e);i&&(i.style.display="none",t(i,"iframe[src]").forEach((e=>{e.removeAttribute("src")}))),t(e,"video[data-lazy-loaded][src], audio[data-lazy-loaded][src], iframe[data-lazy-loaded][src]").forEach((e=>{e.setAttribute("data-src",e.getAttribute("src")),e.removeAttribute("src")})),t(e,"video[data-lazy-loaded] source[src], audio source[src]").forEach((e=>{e.setAttribute("data-src",e.getAttribute("src")),e.removeAttribute("src")}))}formatEmbeddedContent(){let e=(e,i,n)=>{t(this.Reveal.getSlidesElement(),"iframe["+e+'*="'+i+'"]').forEach((t=>{let i=t.getAttribute(e);i&&-1===i.indexOf(n)&&t.setAttribute(e,i+(/\?/.test(i)?"&":"?")+n)}))};e("src","youtube.com/embed/","enablejsapi=1"),e("data-src","youtube.com/embed/","enablejsapi=1"),e("src","player.vimeo.com/","api=1"),e("data-src","player.vimeo.com/","api=1")}startEmbeddedContent(e){e&&!this.Reveal.isSpeakerNotes()&&(t(e,'img[src$=".gif"]').forEach((e=>{e.setAttribute("src",e.getAttribute("src"))})),t(e,"video, audio").forEach((e=>{if(r(e,".fragment")&&!r(e,".fragment.visible"))return;let t=this.Reveal.getConfig().autoPlayMedia;if("boolean"!=typeof t&&(t=e.hasAttribute("data-autoplay")||!!r(e,".slide-background")),t&&"function"==typeof e.play)if(e.readyState>1)this.startEmbeddedMedia({target:e});else if(g){let t=e.play();t&&"function"==typeof t.catch&&!1===e.controls&&t.catch((()=>{e.controls=!0,e.addEventListener("play",(()=>{e.controls=!1}))}))}else e.removeEventListener("loadeddata",this.startEmbeddedMedia),e.addEventListener("loadeddata",this.startEmbeddedMedia)})),t(e,"iframe[src]").forEach((e=>{r(e,".fragment")&&!r(e,".fragment.visible")||this.startEmbeddedIframe({target:e})})),t(e,"iframe[data-src]").forEach((e=>{r(e,".fragment")&&!r(e,".fragment.visible")||e.getAttribute("src")!==e.getAttribute("data-src")&&(e.removeEventListener("load",this.startEmbeddedIframe),e.addEventListener("load",this.startEmbeddedIframe),e.setAttribute("src",e.getAttribute("data-src")))})))}startEmbeddedMedia(e){let t=!!r(e.target,"html"),i=!!r(e.target,".present");t&&i&&(e.target.currentTime=0,e.target.play()),e.target.removeEventListener("loadeddata",this.startEmbeddedMedia)}startEmbeddedIframe(e){let t=e.target;if(t&&t.contentWindow){let i=!!r(e.target,"html"),n=!!r(e.target,".present");if(i&&n){let e=this.Reveal.getConfig().autoPlayMedia;"boolean"!=typeof e&&(e=t.hasAttribute("data-autoplay")||!!r(t,".slide-background")),/youtube\.com\/embed\//.test(t.getAttribute("src"))&&e?t.contentWindow.postMessage('{"event":"command","func":"playVideo","args":""}',"*"):/player\.vimeo\.com\//.test(t.getAttribute("src"))&&e?t.contentWindow.postMessage('{"method":"play"}',"*"):t.contentWindow.postMessage("slide:start","*")}}}stopEmbeddedContent(i,n={}){n=e({unloadIframes:!0},n),i&&i.parentNode&&(t(i,"video, audio").forEach((e=>{e.hasAttribute("data-ignore")||"function"!=typeof e.pause||(e.setAttribute("data-paused-by-reveal",""),e.pause())})),t(i,"iframe").forEach((e=>{e.contentWindow&&e.contentWindow.postMessage("slide:stop","*"),e.removeEventListener("load",this.startEmbeddedIframe)})),t(i,'iframe[src*="youtube.com/embed/"]').forEach((e=>{!e.hasAttribute("data-ignore")&&e.contentWindow&&"function"==typeof e.contentWindow.postMessage&&e.contentWindow.postMessage('{"event":"command","func":"pauseVideo","args":""}',"*")})),t(i,'iframe[src*="player.vimeo.com/"]').forEach((e=>{!e.hasAttribute("data-ignore")&&e.contentWindow&&"function"==typeof e.contentWindow.postMessage&&e.contentWindow.postMessage('{"method":"pause"}',"*")})),!0===n.unloadIframes&&t(i,"iframe[data-src]").forEach((e=>{e.setAttribute("src","about:blank"),e.removeAttribute("src")})))}}class y{constructor(e){this.Reveal=e}render(){this.element=document.createElement("div"),this.element.className="slide-number",this.Reveal.getRevealElement().appendChild(this.element)}configure(e,t){let i="none";e.slideNumber&&!this.Reveal.isPrintingPDF()&&("all"===e.showSlideNumber||"speaker"===e.showSlideNumber&&this.Reveal.isSpeakerNotes())&&(i="block"),this.element.style.display=i}update(){this.Reveal.getConfig().slideNumber&&this.element&&(this.element.innerHTML=this.getSlideNumber())}getSlideNumber(e=this.Reveal.getCurrentSlide()){let t,i=this.Reveal.getConfig(),n="h.v";if("function"==typeof i.slideNumber)t=i.slideNumber(e);else{"string"==typeof i.slideNumber&&(n=i.slideNumber),/c/.test(n)||1!==this.Reveal.getHorizontalSlides().length||(n="c");let s=e&&"uncounted"===e.dataset.visibility?0:1;switch(t=[],n){case"c":t.push(this.Reveal.getSlidePastCount(e)+s);break;case"c/t":t.push(this.Reveal.getSlidePastCount(e)+s,"/",this.Reveal.getTotalSlides());break;default:let i=this.Reveal.getIndices(e);t.push(i.h+s);let a="h/v"===n?"/":".";this.Reveal.isVerticalSlide(e)&&t.push(a,i.v+1)}}let s="#"+this.Reveal.location.getHash(e);return this.formatNumber(t[0],t[1],t[2],s)}formatNumber(e,t,i,n="#"+this.Reveal.location.getHash()){return"number"!=typeof i||isNaN(i)?`\n\t\t\t\t\t${e}\n\t\t\t\t\t`:`\n\t\t\t\t\t${e}\n\t\t\t\t\t${t}\n\t\t\t\t\t${i}\n\t\t\t\t\t`}destroy(){this.element.remove()}}class w{constructor(e){this.Reveal=e,this.onInput=this.onInput.bind(this),this.onBlur=this.onBlur.bind(this),this.onKeyDown=this.onKeyDown.bind(this)}render(){this.element=document.createElement("div"),this.element.className="jump-to-slide",this.jumpInput=document.createElement("input"),this.jumpInput.type="text",this.jumpInput.className="jump-to-slide-input",this.jumpInput.placeholder="Jump to slide",this.jumpInput.addEventListener("input",this.onInput),this.jumpInput.addEventListener("keydown",this.onKeyDown),this.jumpInput.addEventListener("blur",this.onBlur),this.element.appendChild(this.jumpInput)}show(){this.indicesOnShow=this.Reveal.getIndices(),this.Reveal.getRevealElement().appendChild(this.element),this.jumpInput.focus()}hide(){this.isVisible()&&(this.element.remove(),this.jumpInput.value="",clearTimeout(this.jumpTimeout),delete this.jumpTimeout)}isVisible(){return!!this.element.parentNode}jump(){clearTimeout(this.jumpTimeout),delete this.jumpTimeout;const e=this.jumpInput.value.trim("");let t=this.Reveal.location.getIndicesFromHash(e,{oneBasedIndex:!0});return!t&&/\S+/i.test(e)&&e.length>1&&(t=this.search(e)),t&&""!==e?(this.Reveal.slide(t.h,t.v,t.f),!0):(this.Reveal.slide(this.indicesOnShow.h,this.indicesOnShow.v,this.indicesOnShow.f),!1)}jumpAfter(e){clearTimeout(this.jumpTimeout),this.jumpTimeout=setTimeout((()=>this.jump()),e)}search(e){const t=new RegExp("\\b"+e.trim()+"\\b","i"),i=this.Reveal.getSlides().find((e=>t.test(e.innerText)));return i?this.Reveal.getIndices(i):null}cancel(){this.Reveal.slide(this.indicesOnShow.h,this.indicesOnShow.v,this.indicesOnShow.f),this.hide()}confirm(){this.jump(),this.hide()}destroy(){this.jumpInput.removeEventListener("input",this.onInput),this.jumpInput.removeEventListener("keydown",this.onKeyDown),this.jumpInput.removeEventListener("blur",this.onBlur),this.element.remove()}onKeyDown(e){13===e.keyCode?this.confirm():27===e.keyCode&&(this.cancel(),e.stopImmediatePropagation())}onInput(e){this.jumpAfter(200)}onBlur(){setTimeout((()=>this.hide()),1)}}const E=e=>{let t=e.match(/^#([0-9a-f]{3})$/i);if(t&&t[1])return t=t[1],{r:17*parseInt(t.charAt(0),16),g:17*parseInt(t.charAt(1),16),b:17*parseInt(t.charAt(2),16)};let i=e.match(/^#([0-9a-f]{6})$/i);if(i&&i[1])return i=i[1],{r:parseInt(i.slice(0,2),16),g:parseInt(i.slice(2,4),16),b:parseInt(i.slice(4,6),16)};let n=e.match(/^rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$/i);if(n)return{r:parseInt(n[1],10),g:parseInt(n[2],10),b:parseInt(n[3],10)};let s=e.match(/^rgba\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\,\s*([\d]+|[\d]*.[\d]+)\s*\)$/i);return s?{r:parseInt(s[1],10),g:parseInt(s[2],10),b:parseInt(s[3],10),a:parseFloat(s[4])}:null};class R{constructor(e){this.Reveal=e}render(){this.element=document.createElement("div"),this.element.className="backgrounds",this.Reveal.getRevealElement().appendChild(this.element)}create(){this.element.innerHTML="",this.element.classList.add("no-transition"),this.Reveal.getHorizontalSlides().forEach((e=>{let i=this.createBackground(e,this.element);t(e,"section").forEach((e=>{this.createBackground(e,i),i.classList.add("stack")}))})),this.Reveal.getConfig().parallaxBackgroundImage?(this.element.style.backgroundImage='url("'+this.Reveal.getConfig().parallaxBackgroundImage+'")',this.element.style.backgroundSize=this.Reveal.getConfig().parallaxBackgroundSize,this.element.style.backgroundRepeat=this.Reveal.getConfig().parallaxBackgroundRepeat,this.element.style.backgroundPosition=this.Reveal.getConfig().parallaxBackgroundPosition,setTimeout((()=>{this.Reveal.getRevealElement().classList.add("has-parallax-background")}),1)):(this.element.style.backgroundImage="",this.Reveal.getRevealElement().classList.remove("has-parallax-background"))}createBackground(e,t){let i=document.createElement("div");i.className="slide-background "+e.className.replace(/present|past|future/,"");let n=document.createElement("div");return n.className="slide-background-content",i.appendChild(n),t.appendChild(i),e.slideBackgroundElement=i,e.slideBackgroundContentElement=n,this.sync(e),i}sync(e){const t=e.slideBackgroundElement,i=e.slideBackgroundContentElement,n={background:e.getAttribute("data-background"),backgroundSize:e.getAttribute("data-background-size"),backgroundImage:e.getAttribute("data-background-image"),backgroundVideo:e.getAttribute("data-background-video"),backgroundIframe:e.getAttribute("data-background-iframe"),backgroundColor:e.getAttribute("data-background-color"),backgroundGradient:e.getAttribute("data-background-gradient"),backgroundRepeat:e.getAttribute("data-background-repeat"),backgroundPosition:e.getAttribute("data-background-position"),backgroundTransition:e.getAttribute("data-background-transition"),backgroundOpacity:e.getAttribute("data-background-opacity")},s=e.hasAttribute("data-preload");e.classList.remove("has-dark-background"),e.classList.remove("has-light-background"),t.removeAttribute("data-loaded"),t.removeAttribute("data-background-hash"),t.removeAttribute("data-background-size"),t.removeAttribute("data-background-transition"),t.style.backgroundColor="",i.style.backgroundSize="",i.style.backgroundRepeat="",i.style.backgroundPosition="",i.style.backgroundImage="",i.style.opacity="",i.innerHTML="",n.background&&(/^(http|file|\/\/)/gi.test(n.background)||/\.(svg|png|jpg|jpeg|gif|bmp|webp)([?#\s]|$)/gi.test(n.background)?e.setAttribute("data-background-image",n.background):t.style.background=n.background),(n.background||n.backgroundColor||n.backgroundGradient||n.backgroundImage||n.backgroundVideo||n.backgroundIframe)&&t.setAttribute("data-background-hash",n.background+n.backgroundSize+n.backgroundImage+n.backgroundVideo+n.backgroundIframe+n.backgroundColor+n.backgroundGradient+n.backgroundRepeat+n.backgroundPosition+n.backgroundTransition+n.backgroundOpacity),n.backgroundSize&&t.setAttribute("data-background-size",n.backgroundSize),n.backgroundColor&&(t.style.backgroundColor=n.backgroundColor),n.backgroundGradient&&(t.style.backgroundImage=n.backgroundGradient),n.backgroundTransition&&t.setAttribute("data-background-transition",n.backgroundTransition),s&&t.setAttribute("data-preload",""),n.backgroundSize&&(i.style.backgroundSize=n.backgroundSize),n.backgroundRepeat&&(i.style.backgroundRepeat=n.backgroundRepeat),n.backgroundPosition&&(i.style.backgroundPosition=n.backgroundPosition),n.backgroundOpacity&&(i.style.opacity=n.backgroundOpacity);let a=n.backgroundColor;if(!a||!E(a)){let e=window.getComputedStyle(t);e&&e.backgroundColor&&(a=e.backgroundColor)}if(a){const t=E(a);t&&0!==t.a&&("string"==typeof(r=a)&&(r=E(r)),(r?(299*r.r+587*r.g+114*r.b)/1e3:null)<128?e.classList.add("has-dark-background"):e.classList.add("has-light-background"))}var r}update(e=!1){let i=this.Reveal.getCurrentSlide(),n=this.Reveal.getIndices(),s=null,a=this.Reveal.getConfig().rtl?"future":"past",r=this.Reveal.getConfig().rtl?"past":"future";if(Array.from(this.element.childNodes).forEach(((i,o)=>{i.classList.remove("past","present","future"),on.h?i.classList.add(r):(i.classList.add("present"),s=i),(e||o===n.h)&&t(i,".slide-background").forEach(((e,t)=>{e.classList.remove("past","present","future"),tn.v?e.classList.add("future"):(e.classList.add("present"),o===n.h&&(s=e))}))})),this.previousBackground&&this.Reveal.slideContent.stopEmbeddedContent(this.previousBackground,{unloadIframes:!this.Reveal.slideContent.shouldPreload(this.previousBackground)}),s){this.Reveal.slideContent.startEmbeddedContent(s);let e=s.querySelector(".slide-background-content");if(e){let t=e.style.backgroundImage||"";/\.gif/i.test(t)&&(e.style.backgroundImage="",window.getComputedStyle(e).opacity,e.style.backgroundImage=t)}let t=this.previousBackground?this.previousBackground.getAttribute("data-background-hash"):null,i=s.getAttribute("data-background-hash");i&&i===t&&s!==this.previousBackground&&this.element.classList.add("no-transition"),this.previousBackground=s}i&&["has-light-background","has-dark-background"].forEach((e=>{i.classList.contains(e)?this.Reveal.getRevealElement().classList.add(e):this.Reveal.getRevealElement().classList.remove(e)}),this),setTimeout((()=>{this.element.classList.remove("no-transition")}),1)}updateParallax(){let e=this.Reveal.getIndices();if(this.Reveal.getConfig().parallaxBackgroundImage){let t,i,n=this.Reveal.getHorizontalSlides(),s=this.Reveal.getVerticalSlides(),a=this.element.style.backgroundSize.split(" ");1===a.length?t=i=parseInt(a[0],10):(t=parseInt(a[0],10),i=parseInt(a[1],10));let r,o,l=this.element.offsetWidth,d=n.length;r="number"==typeof this.Reveal.getConfig().parallaxBackgroundHorizontal?this.Reveal.getConfig().parallaxBackgroundHorizontal:d>1?(t-l)/(d-1):0,o=r*e.h*-1;let c,h,u=this.element.offsetHeight,g=s.length;c="number"==typeof this.Reveal.getConfig().parallaxBackgroundVertical?this.Reveal.getConfig().parallaxBackgroundVertical:(i-u)/(g-1),h=g>0?c*e.v:0,this.element.style.backgroundPosition=o+"px "+-h+"px"}}destroy(){this.element.remove()}}const S=".slides section",A=".slides>section",k=".slides>section.present>section",L=/registerPlugin|registerKeyboardShortcut|addKeyBinding|addEventListener|showPreview/,C=/fade-(down|up|right|left|out|in-then-out|in-then-semi-out)|semi-fade-out|current-visible|shrink|grow/;let x=0;class P{constructor(e){this.Reveal=e}run(e,t){this.reset();let i=this.Reveal.getSlides(),n=i.indexOf(t),s=i.indexOf(e);if(e.hasAttribute("data-auto-animate")&&t.hasAttribute("data-auto-animate")&&e.getAttribute("data-auto-animate-id")===t.getAttribute("data-auto-animate-id")&&!(n>s?t:e).hasAttribute("data-auto-animate-restart")){this.autoAnimateStyleSheet=this.autoAnimateStyleSheet||l();let i=this.getAutoAnimateOptions(t);e.dataset.autoAnimate="pending",t.dataset.autoAnimate="pending",i.slideDirection=n>s?"forward":"backward";let a="none"===e.style.display;a&&(e.style.display=this.Reveal.getConfig().display);let r=this.getAutoAnimatableElements(e,t).map((e=>this.autoAnimateElements(e.from,e.to,e.options||{},i,x++)));if(a&&(e.style.display="none"),"false"!==t.dataset.autoAnimateUnmatched&&!0===this.Reveal.getConfig().autoAnimateUnmatched){let e=.8*i.duration,n=.2*i.duration;this.getUnmatchedAutoAnimateElements(t).forEach((e=>{let t=this.getAutoAnimateOptions(e,i),n="unmatched";t.duration===i.duration&&t.delay===i.delay||(n="unmatched-"+x++,r.push(`[data-auto-animate="running"] [data-auto-animate-target="${n}"] { transition: opacity ${t.duration}s ease ${t.delay}s; }`)),e.dataset.autoAnimateTarget=n}),this),r.push(`[data-auto-animate="running"] [data-auto-animate-target="unmatched"] { transition: opacity ${e}s ease ${n}s; }`)}this.autoAnimateStyleSheet.innerHTML=r.join(""),requestAnimationFrame((()=>{this.autoAnimateStyleSheet&&(getComputedStyle(this.autoAnimateStyleSheet).fontWeight,t.dataset.autoAnimate="running")})),this.Reveal.dispatchEvent({type:"autoanimate",data:{fromSlide:e,toSlide:t,sheet:this.autoAnimateStyleSheet}})}}reset(){t(this.Reveal.getRevealElement(),'[data-auto-animate]:not([data-auto-animate=""])').forEach((e=>{e.dataset.autoAnimate=""})),t(this.Reveal.getRevealElement(),"[data-auto-animate-target]").forEach((e=>{delete e.dataset.autoAnimateTarget})),this.autoAnimateStyleSheet&&this.autoAnimateStyleSheet.parentNode&&(this.autoAnimateStyleSheet.parentNode.removeChild(this.autoAnimateStyleSheet),this.autoAnimateStyleSheet=null)}autoAnimateElements(e,t,i,n,s){e.dataset.autoAnimateTarget="",t.dataset.autoAnimateTarget=s;let a=this.getAutoAnimateOptions(t,n);void 0!==i.delay&&(a.delay=i.delay),void 0!==i.duration&&(a.duration=i.duration),void 0!==i.easing&&(a.easing=i.easing);let r=this.getAutoAnimatableProperties("from",e,i),o=this.getAutoAnimatableProperties("to",t,i);if(t.classList.contains("fragment")&&(delete o.styles.opacity,e.classList.contains("fragment"))){(e.className.match(C)||[""])[0]===(t.className.match(C)||[""])[0]&&"forward"===n.slideDirection&&t.classList.add("visible","disabled")}if(!1!==i.translate||!1!==i.scale){let e=this.Reveal.getScale(),t={x:(r.x-o.x)/e,y:(r.y-o.y)/e,scaleX:r.width/o.width,scaleY:r.height/o.height};t.x=Math.round(1e3*t.x)/1e3,t.y=Math.round(1e3*t.y)/1e3,t.scaleX=Math.round(1e3*t.scaleX)/1e3,t.scaleX=Math.round(1e3*t.scaleX)/1e3;let n=!1!==i.translate&&(0!==t.x||0!==t.y),s=!1!==i.scale&&(0!==t.scaleX||0!==t.scaleY);if(n||s){let e=[];n&&e.push(`translate(${t.x}px, ${t.y}px)`),s&&e.push(`scale(${t.scaleX}, ${t.scaleY})`),r.styles.transform=e.join(" "),r.styles["transform-origin"]="top left",o.styles.transform="none"}}for(let e in o.styles){const t=o.styles[e],i=r.styles[e];t===i?delete o.styles[e]:(!0===t.explicitValue&&(o.styles[e]=t.value),!0===i.explicitValue&&(r.styles[e]=i.value))}let l="",d=Object.keys(o.styles);if(d.length>0){r.styles.transition="none",o.styles.transition=`all ${a.duration}s ${a.easing} ${a.delay}s`,o.styles["transition-property"]=d.join(", "),o.styles["will-change"]=d.join(", "),l='[data-auto-animate-target="'+s+'"] {'+Object.keys(r.styles).map((e=>e+": "+r.styles[e]+" !important;")).join("")+'}[data-auto-animate="running"] [data-auto-animate-target="'+s+'"] {'+Object.keys(o.styles).map((e=>e+": "+o.styles[e]+" !important;")).join("")+"}"}return l}getAutoAnimateOptions(t,i){let n={easing:this.Reveal.getConfig().autoAnimateEasing,duration:this.Reveal.getConfig().autoAnimateDuration,delay:0};if(n=e(n,i),t.parentNode){let e=r(t.parentNode,"[data-auto-animate-target]");e&&(n=this.getAutoAnimateOptions(e,n))}return t.dataset.autoAnimateEasing&&(n.easing=t.dataset.autoAnimateEasing),t.dataset.autoAnimateDuration&&(n.duration=parseFloat(t.dataset.autoAnimateDuration)),t.dataset.autoAnimateDelay&&(n.delay=parseFloat(t.dataset.autoAnimateDelay)),n}getAutoAnimatableProperties(e,t,i){let n=this.Reveal.getConfig(),s={styles:[]};if(!1!==i.translate||!1!==i.scale){let e;if("function"==typeof i.measure)e=i.measure(t);else if(n.center)e=t.getBoundingClientRect();else{let i=this.Reveal.getScale();e={x:t.offsetLeft*i,y:t.offsetTop*i,width:t.offsetWidth*i,height:t.offsetHeight*i}}s.x=e.x,s.y=e.y,s.width=e.width,s.height=e.height}const a=getComputedStyle(t);return(i.styles||n.autoAnimateStyles).forEach((t=>{let i;"string"==typeof t&&(t={property:t}),void 0!==t.from&&"from"===e?i={value:t.from,explicitValue:!0}:void 0!==t.to&&"to"===e?i={value:t.to,explicitValue:!0}:("line-height"===t.property&&(i=parseFloat(a["line-height"])/parseFloat(a["font-size"])),isNaN(i)&&(i=a[t.property])),""!==i&&(s.styles[t.property]=i)})),s}getAutoAnimatableElements(e,t){let i=("function"==typeof this.Reveal.getConfig().autoAnimateMatcher?this.Reveal.getConfig().autoAnimateMatcher:this.getAutoAnimatePairs).call(this,e,t),n=[];return i.filter(((e,t)=>{if(-1===n.indexOf(e.to))return n.push(e.to),!0}))}getAutoAnimatePairs(e,t){let i=[];const n="h1, h2, h3, h4, h5, h6, p, li";return this.findAutoAnimateMatches(i,e,t,"[data-id]",(e=>e.nodeName+":::"+e.getAttribute("data-id"))),this.findAutoAnimateMatches(i,e,t,n,(e=>e.nodeName+":::"+e.innerText)),this.findAutoAnimateMatches(i,e,t,"img, video, iframe",(e=>e.nodeName+":::"+(e.getAttribute("src")||e.getAttribute("data-src")))),this.findAutoAnimateMatches(i,e,t,"pre",(e=>e.nodeName+":::"+e.innerText)),i.forEach((e=>{a(e.from,n)?e.options={scale:!1}:a(e.from,"pre")&&(e.options={scale:!1,styles:["width","height"]},this.findAutoAnimateMatches(i,e.from,e.to,".hljs .hljs-ln-code",(e=>e.textContent),{scale:!1,styles:[],measure:this.getLocalBoundingBox.bind(this)}),this.findAutoAnimateMatches(i,e.from,e.to,".hljs .hljs-ln-line[data-line-number]",(e=>e.getAttribute("data-line-number")),{scale:!1,styles:["width"],measure:this.getLocalBoundingBox.bind(this)}))}),this),i}getLocalBoundingBox(e){const t=this.Reveal.getScale();return{x:Math.round(e.offsetLeft*t*100)/100,y:Math.round(e.offsetTop*t*100)/100,width:Math.round(e.offsetWidth*t*100)/100,height:Math.round(e.offsetHeight*t*100)/100}}findAutoAnimateMatches(e,t,i,n,s,a){let r={},o={};[].slice.call(t.querySelectorAll(n)).forEach(((e,t)=>{const i=s(e);"string"==typeof i&&i.length&&(r[i]=r[i]||[],r[i].push(e))})),[].slice.call(i.querySelectorAll(n)).forEach(((t,i)=>{const n=s(t);let l;if(o[n]=o[n]||[],o[n].push(t),r[n]){const e=o[n].length-1,t=r[n].length-1;r[n][e]?(l=r[n][e],r[n][e]=null):r[n][t]&&(l=r[n][t],r[n][t]=null)}l&&e.push({from:l,to:t,options:a})}))}getUnmatchedAutoAnimateElements(e){return[].slice.call(e.children).reduce(((e,t)=>{const i=t.querySelector("[data-auto-animate-target]");return t.hasAttribute("data-auto-animate-target")||i||e.push(t),t.querySelector("[data-auto-animate-target]")&&(e=e.concat(this.getUnmatchedAutoAnimateElements(t))),e}),[])}}class N{constructor(e){this.Reveal=e}configure(e,t){!1===e.fragments?this.disable():!1===t.fragments&&this.enable()}disable(){t(this.Reveal.getSlidesElement(),".fragment").forEach((e=>{e.classList.add("visible"),e.classList.remove("current-fragment")}))}enable(){t(this.Reveal.getSlidesElement(),".fragment").forEach((e=>{e.classList.remove("visible"),e.classList.remove("current-fragment")}))}availableRoutes(){let e=this.Reveal.getCurrentSlide();if(e&&this.Reveal.getConfig().fragments){let t=e.querySelectorAll(".fragment:not(.disabled)"),i=e.querySelectorAll(".fragment:not(.disabled):not(.visible)");return{prev:t.length-i.length>0,next:!!i.length}}return{prev:!1,next:!1}}sort(e,t=!1){e=Array.from(e);let i=[],n=[],s=[];e.forEach((e=>{if(e.hasAttribute("data-fragment-index")){let t=parseInt(e.getAttribute("data-fragment-index"),10);i[t]||(i[t]=[]),i[t].push(e)}else n.push([e])})),i=i.concat(n);let a=0;return i.forEach((e=>{e.forEach((e=>{s.push(e),e.setAttribute("data-fragment-index",a)})),a++})),!0===t?i:s}sortAll(){this.Reveal.getHorizontalSlides().forEach((e=>{let i=t(e,"section");i.forEach(((e,t)=>{this.sort(e.querySelectorAll(".fragment"))}),this),0===i.length&&this.sort(e.querySelectorAll(".fragment"))}))}update(e,t){let i={shown:[],hidden:[]},n=this.Reveal.getCurrentSlide();if(n&&this.Reveal.getConfig().fragments&&(t=t||this.sort(n.querySelectorAll(".fragment"))).length){let s=0;if("number"!=typeof e){let t=this.sort(n.querySelectorAll(".fragment.visible")).pop();t&&(e=parseInt(t.getAttribute("data-fragment-index")||0,10))}Array.from(t).forEach(((t,n)=>{if(t.hasAttribute("data-fragment-index")&&(n=parseInt(t.getAttribute("data-fragment-index"),10)),s=Math.max(s,n),n<=e){let s=t.classList.contains("visible");t.classList.add("visible"),t.classList.remove("current-fragment"),n===e&&(this.Reveal.announceStatus(this.Reveal.getStatusText(t)),t.classList.add("current-fragment"),this.Reveal.slideContent.startEmbeddedContent(t)),s||(i.shown.push(t),this.Reveal.dispatchEvent({target:t,type:"visible",bubbles:!1}))}else{let e=t.classList.contains("visible");t.classList.remove("visible"),t.classList.remove("current-fragment"),e&&(this.Reveal.slideContent.stopEmbeddedContent(t),i.hidden.push(t),this.Reveal.dispatchEvent({target:t,type:"hidden",bubbles:!1}))}})),e="number"==typeof e?e:-1,e=Math.max(Math.min(e,s),-1),n.setAttribute("data-fragment",e)}return i}sync(e=this.Reveal.getCurrentSlide()){return this.sort(e.querySelectorAll(".fragment"))}goto(e,t=0){let i=this.Reveal.getCurrentSlide();if(i&&this.Reveal.getConfig().fragments){let n=this.sort(i.querySelectorAll(".fragment:not(.disabled)"));if(n.length){if("number"!=typeof e){let t=this.sort(i.querySelectorAll(".fragment:not(.disabled).visible")).pop();e=t?parseInt(t.getAttribute("data-fragment-index")||0,10):-1}e+=t;let s=this.update(e,n);return s.hidden.length&&this.Reveal.dispatchEvent({type:"fragmenthidden",data:{fragment:s.hidden[0],fragments:s.hidden}}),s.shown.length&&this.Reveal.dispatchEvent({type:"fragmentshown",data:{fragment:s.shown[0],fragments:s.shown}}),this.Reveal.controls.update(),this.Reveal.progress.update(),this.Reveal.getConfig().fragmentInURL&&this.Reveal.location.writeURL(),!(!s.shown.length&&!s.hidden.length)}}return!1}next(){return this.goto(null,1)}prev(){return this.goto(null,-1)}}class M{constructor(e){this.Reveal=e,this.active=!1,this.onSlideClicked=this.onSlideClicked.bind(this)}activate(){if(this.Reveal.getConfig().overview&&!this.isActive()){this.active=!0,this.Reveal.getRevealElement().classList.add("overview"),this.Reveal.cancelAutoSlide(),this.Reveal.getSlidesElement().appendChild(this.Reveal.getBackgroundsElement()),t(this.Reveal.getRevealElement(),S).forEach((e=>{e.classList.contains("stack")||e.addEventListener("click",this.onSlideClicked,!0)}));const e=70,i=this.Reveal.getComputedSlideSize();this.overviewSlideWidth=i.width+e,this.overviewSlideHeight=i.height+e,this.Reveal.getConfig().rtl&&(this.overviewSlideWidth=-this.overviewSlideWidth),this.Reveal.updateSlidesVisibility(),this.layout(),this.update(),this.Reveal.layout();const n=this.Reveal.getIndices();this.Reveal.dispatchEvent({type:"overviewshown",data:{indexh:n.h,indexv:n.v,currentSlide:this.Reveal.getCurrentSlide()}})}}layout(){this.Reveal.getHorizontalSlides().forEach(((e,i)=>{e.setAttribute("data-index-h",i),s(e,"translate3d("+i*this.overviewSlideWidth+"px, 0, 0)"),e.classList.contains("stack")&&t(e,"section").forEach(((e,t)=>{e.setAttribute("data-index-h",i),e.setAttribute("data-index-v",t),s(e,"translate3d(0, "+t*this.overviewSlideHeight+"px, 0)")}))})),Array.from(this.Reveal.getBackgroundsElement().childNodes).forEach(((e,i)=>{s(e,"translate3d("+i*this.overviewSlideWidth+"px, 0, 0)"),t(e,".slide-background").forEach(((e,t)=>{s(e,"translate3d(0, "+t*this.overviewSlideHeight+"px, 0)")}))}))}update(){const e=Math.min(window.innerWidth,window.innerHeight),t=Math.max(e/5,150)/e,i=this.Reveal.getIndices();this.Reveal.transformSlides({overview:["scale("+t+")","translateX("+-i.h*this.overviewSlideWidth+"px)","translateY("+-i.v*this.overviewSlideHeight+"px)"].join(" ")})}deactivate(){if(this.Reveal.getConfig().overview){this.active=!1,this.Reveal.getRevealElement().classList.remove("overview"),this.Reveal.getRevealElement().classList.add("overview-deactivating"),setTimeout((()=>{this.Reveal.getRevealElement().classList.remove("overview-deactivating")}),1),this.Reveal.getRevealElement().appendChild(this.Reveal.getBackgroundsElement()),t(this.Reveal.getRevealElement(),S).forEach((e=>{s(e,""),e.removeEventListener("click",this.onSlideClicked,!0)})),t(this.Reveal.getBackgroundsElement(),".slide-background").forEach((e=>{s(e,"")})),this.Reveal.transformSlides({overview:""});const e=this.Reveal.getIndices();this.Reveal.slide(e.h,e.v),this.Reveal.layout(),this.Reveal.cueAutoSlide(),this.Reveal.dispatchEvent({type:"overviewhidden",data:{indexh:e.h,indexv:e.v,currentSlide:this.Reveal.getCurrentSlide()}})}}toggle(e){"boolean"==typeof e?e?this.activate():this.deactivate():this.isActive()?this.deactivate():this.activate()}isActive(){return this.active}onSlideClicked(e){if(this.isActive()){e.preventDefault();let t=e.target;for(;t&&!t.nodeName.match(/section/gi);)t=t.parentNode;if(t&&!t.classList.contains("disabled")&&(this.deactivate(),t.nodeName.match(/section/gi))){let e=parseInt(t.getAttribute("data-index-h"),10),i=parseInt(t.getAttribute("data-index-v"),10);this.Reveal.slide(e,i)}}}}class I{constructor(e){this.Reveal=e,this.shortcuts={},this.bindings={},this.onDocumentKeyDown=this.onDocumentKeyDown.bind(this),this.onDocumentKeyPress=this.onDocumentKeyPress.bind(this)}configure(e,t){"linear"===e.navigationMode?(this.shortcuts["→ , ↓ , SPACE , N , L , J"]="Next slide",this.shortcuts["← , ↑ , P , H , K"]="Previous slide"):(this.shortcuts["N , SPACE"]="Next slide",this.shortcuts["P , Shift SPACE"]="Previous slide",this.shortcuts["← , H"]="Navigate left",this.shortcuts["→ , L"]="Navigate right",this.shortcuts["↑ , K"]="Navigate up",this.shortcuts["↓ , J"]="Navigate down"),this.shortcuts["Alt + ←/↑/→/↓"]="Navigate without fragments",this.shortcuts["Shift + ←/↑/→/↓"]="Jump to first/last slide",this.shortcuts["B , ."]="Pause",this.shortcuts.F="Fullscreen",this.shortcuts.G="Jump to slide",this.shortcuts["ESC, O"]="Slide overview"}bind(){document.addEventListener("keydown",this.onDocumentKeyDown,!1),document.addEventListener("keypress",this.onDocumentKeyPress,!1)}unbind(){document.removeEventListener("keydown",this.onDocumentKeyDown,!1),document.removeEventListener("keypress",this.onDocumentKeyPress,!1)}addKeyBinding(e,t){"object"==typeof e&&e.keyCode?this.bindings[e.keyCode]={callback:t,key:e.key,description:e.description}:this.bindings[e]={callback:t,key:null,description:null}}removeKeyBinding(e){delete this.bindings[e]}triggerKey(e){this.onDocumentKeyDown({keyCode:e})}registerKeyboardShortcut(e,t){this.shortcuts[e]=t}getShortcuts(){return this.shortcuts}getBindings(){return this.bindings}onDocumentKeyPress(e){e.shiftKey&&63===e.charCode&&this.Reveal.toggleHelp()}onDocumentKeyDown(e){let t=this.Reveal.getConfig();if("function"==typeof t.keyboardCondition&&!1===t.keyboardCondition(e))return!0;if("focused"===t.keyboardCondition&&!this.Reveal.isFocused())return!0;let i=e.keyCode,n=!this.Reveal.isAutoSliding();this.Reveal.onUserInput(e);let s=document.activeElement&&!0===document.activeElement.isContentEditable,a=document.activeElement&&document.activeElement.tagName&&/input|textarea/i.test(document.activeElement.tagName),r=document.activeElement&&document.activeElement.className&&/speaker-notes/i.test(document.activeElement.className),o=!(-1!==[32,37,38,39,40,78,80].indexOf(e.keyCode)&&e.shiftKey||e.altKey)&&(e.shiftKey||e.altKey||e.ctrlKey||e.metaKey);if(s||a||r||o)return;let l,d=[66,86,190,191];if("object"==typeof t.keyboard)for(l in t.keyboard)"togglePause"===t.keyboard[l]&&d.push(parseInt(l,10));if(this.Reveal.isPaused()&&-1===d.indexOf(i))return!1;let c="linear"===t.navigationMode||!this.Reveal.hasHorizontalSlides()||!this.Reveal.hasVerticalSlides(),h=!1;if("object"==typeof t.keyboard)for(l in t.keyboard)if(parseInt(l,10)===i){let i=t.keyboard[l];"function"==typeof i?i.apply(null,[e]):"string"==typeof i&&"function"==typeof this.Reveal[i]&&this.Reveal[i].call(),h=!0}if(!1===h)for(l in this.bindings)if(parseInt(l,10)===i){let t=this.bindings[l].callback;"function"==typeof t?t.apply(null,[e]):"string"==typeof t&&"function"==typeof this.Reveal[t]&&this.Reveal[t].call(),h=!0}!1===h&&(h=!0,80===i||33===i?this.Reveal.prev({skipFragments:e.altKey}):78===i||34===i?this.Reveal.next({skipFragments:e.altKey}):72===i||37===i?e.shiftKey?this.Reveal.slide(0):!this.Reveal.overview.isActive()&&c?this.Reveal.prev({skipFragments:e.altKey}):this.Reveal.left({skipFragments:e.altKey}):76===i||39===i?e.shiftKey?this.Reveal.slide(this.Reveal.getHorizontalSlides().length-1):!this.Reveal.overview.isActive()&&c?this.Reveal.next({skipFragments:e.altKey}):this.Reveal.right({skipFragments:e.altKey}):75===i||38===i?e.shiftKey?this.Reveal.slide(void 0,0):!this.Reveal.overview.isActive()&&c?this.Reveal.prev({skipFragments:e.altKey}):this.Reveal.up({skipFragments:e.altKey}):74===i||40===i?e.shiftKey?this.Reveal.slide(void 0,Number.MAX_VALUE):!this.Reveal.overview.isActive()&&c?this.Reveal.next({skipFragments:e.altKey}):this.Reveal.down({skipFragments:e.altKey}):36===i?this.Reveal.slide(0):35===i?this.Reveal.slide(this.Reveal.getHorizontalSlides().length-1):32===i?(this.Reveal.overview.isActive()&&this.Reveal.overview.deactivate(),e.shiftKey?this.Reveal.prev({skipFragments:e.altKey}):this.Reveal.next({skipFragments:e.altKey})):58===i||59===i||66===i||86===i||190===i||191===i?this.Reveal.togglePause():70===i?(e=>{let t=(e=e||document.documentElement).requestFullscreen||e.webkitRequestFullscreen||e.webkitRequestFullScreen||e.mozRequestFullScreen||e.msRequestFullscreen;t&&t.apply(e)})(t.embedded?this.Reveal.getViewportElement():document.documentElement):65===i?t.autoSlideStoppable&&this.Reveal.toggleAutoSlide(n):71===i?t.jumpToSlide&&this.Reveal.toggleJumpToSlide():h=!1),h?e.preventDefault&&e.preventDefault():27!==i&&79!==i||(!1===this.Reveal.closeOverlay()&&this.Reveal.overview.toggle(),e.preventDefault&&e.preventDefault()),this.Reveal.cueAutoSlide()}}class D{constructor(e){var t,i,n;n=1e3,(i="MAX_REPLACE_STATE_FREQUENCY")in(t=this)?Object.defineProperty(t,i,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[i]=n,this.Reveal=e,this.writeURLTimeout=0,this.replaceStateTimestamp=0,this.onWindowHashChange=this.onWindowHashChange.bind(this)}bind(){window.addEventListener("hashchange",this.onWindowHashChange,!1)}unbind(){window.removeEventListener("hashchange",this.onWindowHashChange,!1)}getIndicesFromHash(e=window.location.hash,t={}){let i=e.replace(/^#\/?/,""),n=i.split("/");if(/^[0-9]*$/.test(n[0])||!i.length){const e=this.Reveal.getConfig();let i,s=e.hashOneBasedIndex||t.oneBasedIndex?1:0,a=parseInt(n[0],10)-s||0,r=parseInt(n[1],10)-s||0;return e.fragmentInURL&&(i=parseInt(n[2],10),isNaN(i)&&(i=void 0)),{h:a,v:r,f:i}}{let e,t;/\/[-\d]+$/g.test(i)&&(t=parseInt(i.split("/").pop(),10),t=isNaN(t)?void 0:t,i=i.split("/").shift());try{e=document.getElementById(decodeURIComponent(i))}catch(e){}if(e)return{...this.Reveal.getIndices(e),f:t}}return null}readURL(){const e=this.Reveal.getIndices(),t=this.getIndicesFromHash();t?t.h===e.h&&t.v===e.v&&void 0===t.f||this.Reveal.slide(t.h,t.v,t.f):this.Reveal.slide(e.h||0,e.v||0)}writeURL(e){let t=this.Reveal.getConfig(),i=this.Reveal.getCurrentSlide();if(clearTimeout(this.writeURLTimeout),"number"==typeof e)this.writeURLTimeout=setTimeout(this.writeURL,e);else if(i){let e=this.getHash();t.history?window.location.hash=e:t.hash&&("/"===e?this.debouncedReplaceState(window.location.pathname+window.location.search):this.debouncedReplaceState("#"+e))}}replaceState(e){window.history.replaceState(null,null,e),this.replaceStateTimestamp=Date.now()}debouncedReplaceState(e){clearTimeout(this.replaceStateTimeout),Date.now()-this.replaceStateTimestamp>this.MAX_REPLACE_STATE_FREQUENCY?this.replaceState(e):this.replaceStateTimeout=setTimeout((()=>this.replaceState(e)),this.MAX_REPLACE_STATE_FREQUENCY)}getHash(e){let t="/",i=e||this.Reveal.getCurrentSlide(),n=i?i.getAttribute("id"):null;n&&(n=encodeURIComponent(n));let s=this.Reveal.getIndices(e);if(this.Reveal.getConfig().fragmentInURL||(s.f=void 0),"string"==typeof n&&n.length)t="/"+n,s.f>=0&&(t+="/"+s.f);else{let e=this.Reveal.getConfig().hashOneBasedIndex?1:0;(s.h>0||s.v>0||s.f>=0)&&(t+=s.h+e),(s.v>0||s.f>=0)&&(t+="/"+(s.v+e)),s.f>=0&&(t+="/"+s.f)}return t}onWindowHashChange(e){this.readURL()}}class T{constructor(e){this.Reveal=e,this.onNavigateLeftClicked=this.onNavigateLeftClicked.bind(this),this.onNavigateRightClicked=this.onNavigateRightClicked.bind(this),this.onNavigateUpClicked=this.onNavigateUpClicked.bind(this),this.onNavigateDownClicked=this.onNavigateDownClicked.bind(this),this.onNavigatePrevClicked=this.onNavigatePrevClicked.bind(this),this.onNavigateNextClicked=this.onNavigateNextClicked.bind(this)}render(){const e=this.Reveal.getConfig().rtl,i=this.Reveal.getRevealElement();this.element=document.createElement("aside"),this.element.className="controls",this.element.innerHTML=`\n\t\t\t\n\t\t\t\n\t\t\t`,this.Reveal.getRevealElement().appendChild(this.element),this.controlsLeft=t(i,".navigate-left"),this.controlsRight=t(i,".navigate-right"),this.controlsUp=t(i,".navigate-up"),this.controlsDown=t(i,".navigate-down"),this.controlsPrev=t(i,".navigate-prev"),this.controlsNext=t(i,".navigate-next"),this.controlsRightArrow=this.element.querySelector(".navigate-right"),this.controlsLeftArrow=this.element.querySelector(".navigate-left"),this.controlsDownArrow=this.element.querySelector(".navigate-down")}configure(e,t){this.element.style.display=e.controls?"block":"none",this.element.setAttribute("data-controls-layout",e.controlsLayout),this.element.setAttribute("data-controls-back-arrows",e.controlsBackArrows)}bind(){let e=["touchstart","click"];v&&(e=["touchstart"]),e.forEach((e=>{this.controlsLeft.forEach((t=>t.addEventListener(e,this.onNavigateLeftClicked,!1))),this.controlsRight.forEach((t=>t.addEventListener(e,this.onNavigateRightClicked,!1))),this.controlsUp.forEach((t=>t.addEventListener(e,this.onNavigateUpClicked,!1))),this.controlsDown.forEach((t=>t.addEventListener(e,this.onNavigateDownClicked,!1))),this.controlsPrev.forEach((t=>t.addEventListener(e,this.onNavigatePrevClicked,!1))),this.controlsNext.forEach((t=>t.addEventListener(e,this.onNavigateNextClicked,!1)))}))}unbind(){["touchstart","click"].forEach((e=>{this.controlsLeft.forEach((t=>t.removeEventListener(e,this.onNavigateLeftClicked,!1))),this.controlsRight.forEach((t=>t.removeEventListener(e,this.onNavigateRightClicked,!1))),this.controlsUp.forEach((t=>t.removeEventListener(e,this.onNavigateUpClicked,!1))),this.controlsDown.forEach((t=>t.removeEventListener(e,this.onNavigateDownClicked,!1))),this.controlsPrev.forEach((t=>t.removeEventListener(e,this.onNavigatePrevClicked,!1))),this.controlsNext.forEach((t=>t.removeEventListener(e,this.onNavigateNextClicked,!1)))}))}update(){let e=this.Reveal.availableRoutes();[...this.controlsLeft,...this.controlsRight,...this.controlsUp,...this.controlsDown,...this.controlsPrev,...this.controlsNext].forEach((e=>{e.classList.remove("enabled","fragmented"),e.setAttribute("disabled","disabled")})),e.left&&this.controlsLeft.forEach((e=>{e.classList.add("enabled"),e.removeAttribute("disabled")})),e.right&&this.controlsRight.forEach((e=>{e.classList.add("enabled"),e.removeAttribute("disabled")})),e.up&&this.controlsUp.forEach((e=>{e.classList.add("enabled"),e.removeAttribute("disabled")})),e.down&&this.controlsDown.forEach((e=>{e.classList.add("enabled"),e.removeAttribute("disabled")})),(e.left||e.up)&&this.controlsPrev.forEach((e=>{e.classList.add("enabled"),e.removeAttribute("disabled")})),(e.right||e.down)&&this.controlsNext.forEach((e=>{e.classList.add("enabled"),e.removeAttribute("disabled")}));let t=this.Reveal.getCurrentSlide();if(t){let e=this.Reveal.fragments.availableRoutes();e.prev&&this.controlsPrev.forEach((e=>{e.classList.add("fragmented","enabled"),e.removeAttribute("disabled")})),e.next&&this.controlsNext.forEach((e=>{e.classList.add("fragmented","enabled"),e.removeAttribute("disabled")})),this.Reveal.isVerticalSlide(t)?(e.prev&&this.controlsUp.forEach((e=>{e.classList.add("fragmented","enabled"),e.removeAttribute("disabled")})),e.next&&this.controlsDown.forEach((e=>{e.classList.add("fragmented","enabled"),e.removeAttribute("disabled")}))):(e.prev&&this.controlsLeft.forEach((e=>{e.classList.add("fragmented","enabled"),e.removeAttribute("disabled")})),e.next&&this.controlsRight.forEach((e=>{e.classList.add("fragmented","enabled"),e.removeAttribute("disabled")})))}if(this.Reveal.getConfig().controlsTutorial){let t=this.Reveal.getIndices();!this.Reveal.hasNavigatedVertically()&&e.down?this.controlsDownArrow.classList.add("highlight"):(this.controlsDownArrow.classList.remove("highlight"),this.Reveal.getConfig().rtl?!this.Reveal.hasNavigatedHorizontally()&&e.left&&0===t.v?this.controlsLeftArrow.classList.add("highlight"):this.controlsLeftArrow.classList.remove("highlight"):!this.Reveal.hasNavigatedHorizontally()&&e.right&&0===t.v?this.controlsRightArrow.classList.add("highlight"):this.controlsRightArrow.classList.remove("highlight"))}}destroy(){this.unbind(),this.element.remove()}onNavigateLeftClicked(e){e.preventDefault(),this.Reveal.onUserInput(),"linear"===this.Reveal.getConfig().navigationMode?this.Reveal.prev():this.Reveal.left()}onNavigateRightClicked(e){e.preventDefault(),this.Reveal.onUserInput(),"linear"===this.Reveal.getConfig().navigationMode?this.Reveal.next():this.Reveal.right()}onNavigateUpClicked(e){e.preventDefault(),this.Reveal.onUserInput(),this.Reveal.up()}onNavigateDownClicked(e){e.preventDefault(),this.Reveal.onUserInput(),this.Reveal.down()}onNavigatePrevClicked(e){e.preventDefault(),this.Reveal.onUserInput(),this.Reveal.prev()}onNavigateNextClicked(e){e.preventDefault(),this.Reveal.onUserInput(),this.Reveal.next()}}class F{constructor(e){this.Reveal=e,this.onProgressClicked=this.onProgressClicked.bind(this)}render(){this.element=document.createElement("div"),this.element.className="progress",this.Reveal.getRevealElement().appendChild(this.element),this.bar=document.createElement("span"),this.element.appendChild(this.bar)}configure(e,t){this.element.style.display=e.progress?"block":"none"}bind(){this.Reveal.getConfig().progress&&this.element&&this.element.addEventListener("click",this.onProgressClicked,!1)}unbind(){this.Reveal.getConfig().progress&&this.element&&this.element.removeEventListener("click",this.onProgressClicked,!1)}update(){if(this.Reveal.getConfig().progress&&this.bar){let e=this.Reveal.getProgress();this.Reveal.getTotalSlides()<2&&(e=0),this.bar.style.transform="scaleX("+e+")"}}getMaxWidth(){return this.Reveal.getRevealElement().offsetWidth}onProgressClicked(e){this.Reveal.onUserInput(e),e.preventDefault();let t=this.Reveal.getSlides(),i=t.length,n=Math.floor(e.clientX/this.getMaxWidth()*i);this.Reveal.getConfig().rtl&&(n=i-n);let s=this.Reveal.getIndices(t[n]);this.Reveal.slide(s.h,s.v)}destroy(){this.element.remove()}}class z{constructor(e){this.Reveal=e,this.lastMouseWheelStep=0,this.cursorHidden=!1,this.cursorInactiveTimeout=0,this.onDocumentCursorActive=this.onDocumentCursorActive.bind(this),this.onDocumentMouseScroll=this.onDocumentMouseScroll.bind(this)}configure(e,t){e.mouseWheel?(document.addEventListener("DOMMouseScroll",this.onDocumentMouseScroll,!1),document.addEventListener("mousewheel",this.onDocumentMouseScroll,!1)):(document.removeEventListener("DOMMouseScroll",this.onDocumentMouseScroll,!1),document.removeEventListener("mousewheel",this.onDocumentMouseScroll,!1)),e.hideInactiveCursor?(document.addEventListener("mousemove",this.onDocumentCursorActive,!1),document.addEventListener("mousedown",this.onDocumentCursorActive,!1)):(this.showCursor(),document.removeEventListener("mousemove",this.onDocumentCursorActive,!1),document.removeEventListener("mousedown",this.onDocumentCursorActive,!1))}showCursor(){this.cursorHidden&&(this.cursorHidden=!1,this.Reveal.getRevealElement().style.cursor="")}hideCursor(){!1===this.cursorHidden&&(this.cursorHidden=!0,this.Reveal.getRevealElement().style.cursor="none")}destroy(){this.showCursor(),document.removeEventListener("DOMMouseScroll",this.onDocumentMouseScroll,!1),document.removeEventListener("mousewheel",this.onDocumentMouseScroll,!1),document.removeEventListener("mousemove",this.onDocumentCursorActive,!1),document.removeEventListener("mousedown",this.onDocumentCursorActive,!1)}onDocumentCursorActive(e){this.showCursor(),clearTimeout(this.cursorInactiveTimeout),this.cursorInactiveTimeout=setTimeout(this.hideCursor.bind(this),this.Reveal.getConfig().hideCursorTime)}onDocumentMouseScroll(e){if(Date.now()-this.lastMouseWheelStep>1e3){this.lastMouseWheelStep=Date.now();let t=e.detail||-e.wheelDelta;t>0?this.Reveal.next():t<0&&this.Reveal.prev()}}}const H=(e,t)=>{const i=document.createElement("script");i.type="text/javascript",i.async=!1,i.defer=!1,i.src=e,"function"==typeof t&&(i.onload=i.onreadystatechange=e=>{("load"===e.type||/loaded|complete/.test(i.readyState))&&(i.onload=i.onreadystatechange=i.onerror=null,t())},i.onerror=e=>{i.onload=i.onreadystatechange=i.onerror=null,t(new Error("Failed loading script: "+i.src+"\n"+e))});const n=document.querySelector("head");n.insertBefore(i,n.lastChild)};class B{constructor(e){this.Reveal=e,this.state="idle",this.registeredPlugins={},this.asyncDependencies=[]}load(e,t){return this.state="loading",e.forEach(this.registerPlugin.bind(this)),new Promise((e=>{let i=[],n=0;if(t.forEach((e=>{e.condition&&!e.condition()||(e.async?this.asyncDependencies.push(e):i.push(e))})),i.length){n=i.length;const t=t=>{t&&"function"==typeof t.callback&&t.callback(),0==--n&&this.initPlugins().then(e)};i.forEach((e=>{"string"==typeof e.id?(this.registerPlugin(e),t(e)):"string"==typeof e.src?H(e.src,(()=>t(e))):(console.warn("Unrecognized plugin format",e),t())}))}else this.initPlugins().then(e)}))}initPlugins(){return new Promise((e=>{let t=Object.values(this.registeredPlugins),i=t.length;if(0===i)this.loadAsync().then(e);else{let n,s=()=>{0==--i?this.loadAsync().then(e):n()},a=0;n=()=>{let e=t[a++];if("function"==typeof e.init){let t=e.init(this.Reveal);t&&"function"==typeof t.then?t.then(s):s()}else s()},n()}}))}loadAsync(){return this.state="loaded",this.asyncDependencies.length&&this.asyncDependencies.forEach((e=>{H(e.src,e.callback)})),Promise.resolve()}registerPlugin(e){2===arguments.length&&"string"==typeof arguments[0]?(e=arguments[1]).id=arguments[0]:"function"==typeof e&&(e=e());let t=e.id;"string"!=typeof t?console.warn("Unrecognized plugin format; can't find plugin.id",e):void 0===this.registeredPlugins[t]?(this.registeredPlugins[t]=e,"loaded"===this.state&&"function"==typeof e.init&&e.init(this.Reveal)):console.warn('reveal.js: "'+t+'" plugin has already been registered')}hasPlugin(e){return!!this.registeredPlugins[e]}getPlugin(e){return this.registeredPlugins[e]}getRegisteredPlugins(){return this.registeredPlugins}destroy(){Object.values(this.registeredPlugins).forEach((e=>{"function"==typeof e.destroy&&e.destroy()})),this.registeredPlugins={},this.asyncDependencies=[]}}class O{constructor(e){this.Reveal=e}async setupPDF(){const e=this.Reveal.getConfig(),i=t(this.Reveal.getRevealElement(),S),n=e.slideNumber&&/all|print/i.test(e.showSlideNumber),s=this.Reveal.getComputedSlideSize(window.innerWidth,window.innerHeight),a=Math.floor(s.width*(1+e.margin)),r=Math.floor(s.height*(1+e.margin)),o=s.width,d=s.height;await new Promise(requestAnimationFrame),l("@page{size:"+a+"px "+r+"px; margin: 0px;}"),l(".reveal section>img, .reveal section>video, .reveal section>iframe{max-width: "+o+"px; max-height:"+d+"px}"),document.documentElement.classList.add("print-pdf"),document.body.style.width=a+"px",document.body.style.height=r+"px";const c=document.querySelector(".reveal-viewport");let h;if(c){const e=window.getComputedStyle(c);e&&e.background&&(h=e.background)}await new Promise(requestAnimationFrame),this.Reveal.layoutSlideContents(o,d),await new Promise(requestAnimationFrame);const u=i.map((e=>e.scrollHeight)),g=[],v=i[0].parentNode;let p=1;i.forEach((function(i,s){if(!1===i.classList.contains("stack")){let l=(a-o)/2,c=(r-d)/2;const v=u[s];let m=Math.max(Math.ceil(v/r),1);m=Math.min(m,e.pdfMaxPagesPerSlide),(1===m&&e.center||i.classList.contains("center"))&&(c=Math.max((r-v)/2,0));const f=document.createElement("div");if(g.push(f),f.className="pdf-page",f.style.height=(r+e.pdfPageHeightOffset)*m+"px",h&&(f.style.background=h),f.appendChild(i),i.style.left=l+"px",i.style.top=c+"px",i.style.width=o+"px",this.Reveal.slideContent.layout(i),i.slideBackgroundElement&&f.insertBefore(i.slideBackgroundElement,i),e.showNotes){const t=this.Reveal.getSlideNotes(i);if(t){const i=8,n="string"==typeof e.showNotes?e.showNotes:"inline",s=document.createElement("div");s.classList.add("speaker-notes"),s.classList.add("speaker-notes-pdf"),s.setAttribute("data-layout",n),s.innerHTML=t,"separate-page"===n?g.push(s):(s.style.left=i+"px",s.style.bottom=i+"px",s.style.width=a-2*i+"px",f.appendChild(s))}}if(n){const e=document.createElement("div");e.classList.add("slide-number"),e.classList.add("slide-number-pdf"),e.innerHTML=p++,f.appendChild(e)}if(e.pdfSeparateFragments){const e=this.Reveal.fragments.sort(f.querySelectorAll(".fragment"),!0);let t;e.forEach((function(e,i){t&&t.forEach((function(e){e.classList.remove("current-fragment")})),e.forEach((function(e){e.classList.add("visible","current-fragment")}),this);const s=f.cloneNode(!0);if(n){const e=i+1;s.querySelector(".slide-number-pdf").innerHTML+="."+e}g.push(s),t=e}),this),e.forEach((function(e){e.forEach((function(e){e.classList.remove("visible","current-fragment")}))}))}else t(f,".fragment:not(.fade-out)").forEach((function(e){e.classList.add("visible")}))}}),this),await new Promise(requestAnimationFrame),g.forEach((e=>v.appendChild(e))),this.Reveal.slideContent.layout(this.Reveal.getSlidesElement()),this.Reveal.dispatchEvent({type:"pdf-ready"})}isPrintingPDF(){return/print-pdf/gi.test(window.location.search)}}class q{constructor(e){this.Reveal=e,this.touchStartX=0,this.touchStartY=0,this.touchStartCount=0,this.touchCaptured=!1,this.onPointerDown=this.onPointerDown.bind(this),this.onPointerMove=this.onPointerMove.bind(this),this.onPointerUp=this.onPointerUp.bind(this),this.onTouchStart=this.onTouchStart.bind(this),this.onTouchMove=this.onTouchMove.bind(this),this.onTouchEnd=this.onTouchEnd.bind(this)}bind(){let e=this.Reveal.getRevealElement();"onpointerdown"in window?(e.addEventListener("pointerdown",this.onPointerDown,!1),e.addEventListener("pointermove",this.onPointerMove,!1),e.addEventListener("pointerup",this.onPointerUp,!1)):window.navigator.msPointerEnabled?(e.addEventListener("MSPointerDown",this.onPointerDown,!1),e.addEventListener("MSPointerMove",this.onPointerMove,!1),e.addEventListener("MSPointerUp",this.onPointerUp,!1)):(e.addEventListener("touchstart",this.onTouchStart,!1),e.addEventListener("touchmove",this.onTouchMove,!1),e.addEventListener("touchend",this.onTouchEnd,!1))}unbind(){let e=this.Reveal.getRevealElement();e.removeEventListener("pointerdown",this.onPointerDown,!1),e.removeEventListener("pointermove",this.onPointerMove,!1),e.removeEventListener("pointerup",this.onPointerUp,!1),e.removeEventListener("MSPointerDown",this.onPointerDown,!1),e.removeEventListener("MSPointerMove",this.onPointerMove,!1),e.removeEventListener("MSPointerUp",this.onPointerUp,!1),e.removeEventListener("touchstart",this.onTouchStart,!1),e.removeEventListener("touchmove",this.onTouchMove,!1),e.removeEventListener("touchend",this.onTouchEnd,!1)}isSwipePrevented(e){if(a(e,"video, audio"))return!0;for(;e&&"function"==typeof e.hasAttribute;){if(e.hasAttribute("data-prevent-swipe"))return!0;e=e.parentNode}return!1}onTouchStart(e){if(this.isSwipePrevented(e.target))return!0;this.touchStartX=e.touches[0].clientX,this.touchStartY=e.touches[0].clientY,this.touchStartCount=e.touches.length}onTouchMove(e){if(this.isSwipePrevented(e.target))return!0;let t=this.Reveal.getConfig();if(this.touchCaptured)v&&e.preventDefault();else{this.Reveal.onUserInput(e);let i=e.touches[0].clientX,n=e.touches[0].clientY;if(1===e.touches.length&&2!==this.touchStartCount){let s=this.Reveal.availableRoutes({includeFragments:!0}),a=i-this.touchStartX,r=n-this.touchStartY;a>40&&Math.abs(a)>Math.abs(r)?(this.touchCaptured=!0,"linear"===t.navigationMode?t.rtl?this.Reveal.next():this.Reveal.prev():this.Reveal.left()):a<-40&&Math.abs(a)>Math.abs(r)?(this.touchCaptured=!0,"linear"===t.navigationMode?t.rtl?this.Reveal.prev():this.Reveal.next():this.Reveal.right()):r>40&&s.up?(this.touchCaptured=!0,"linear"===t.navigationMode?this.Reveal.prev():this.Reveal.up()):r<-40&&s.down&&(this.touchCaptured=!0,"linear"===t.navigationMode?this.Reveal.next():this.Reveal.down()),t.embedded?(this.touchCaptured||this.Reveal.isVerticalSlide())&&e.preventDefault():e.preventDefault()}}}onTouchEnd(e){this.touchCaptured=!1}onPointerDown(e){e.pointerType!==e.MSPOINTER_TYPE_TOUCH&&"touch"!==e.pointerType||(e.touches=[{clientX:e.clientX,clientY:e.clientY}],this.onTouchStart(e))}onPointerMove(e){e.pointerType!==e.MSPOINTER_TYPE_TOUCH&&"touch"!==e.pointerType||(e.touches=[{clientX:e.clientX,clientY:e.clientY}],this.onTouchMove(e))}onPointerUp(e){e.pointerType!==e.MSPOINTER_TYPE_TOUCH&&"touch"!==e.pointerType||(e.touches=[{clientX:e.clientX,clientY:e.clientY}],this.onTouchEnd(e))}}const U="focus",j="blur";class W{constructor(e){this.Reveal=e,this.onRevealPointerDown=this.onRevealPointerDown.bind(this),this.onDocumentPointerDown=this.onDocumentPointerDown.bind(this)}configure(e,t){e.embedded?this.blur():(this.focus(),this.unbind())}bind(){this.Reveal.getConfig().embedded&&this.Reveal.getRevealElement().addEventListener("pointerdown",this.onRevealPointerDown,!1)}unbind(){this.Reveal.getRevealElement().removeEventListener("pointerdown",this.onRevealPointerDown,!1),document.removeEventListener("pointerdown",this.onDocumentPointerDown,!1)}focus(){this.state!==U&&(this.Reveal.getRevealElement().classList.add("focused"),document.addEventListener("pointerdown",this.onDocumentPointerDown,!1)),this.state=U}blur(){this.state!==j&&(this.Reveal.getRevealElement().classList.remove("focused"),document.removeEventListener("pointerdown",this.onDocumentPointerDown,!1)),this.state=j}isFocused(){return this.state===U}destroy(){this.Reveal.getRevealElement().classList.remove("focused")}onRevealPointerDown(e){this.focus()}onDocumentPointerDown(e){let t=r(e.target,".reveal");t&&t===this.Reveal.getRevealElement()||this.blur()}}class K{constructor(e){this.Reveal=e}render(){this.element=document.createElement("div"),this.element.className="speaker-notes",this.element.setAttribute("data-prevent-swipe",""),this.element.setAttribute("tabindex","0"),this.Reveal.getRevealElement().appendChild(this.element)}configure(e,t){e.showNotes&&this.element.setAttribute("data-layout","string"==typeof e.showNotes?e.showNotes:"inline")}update(){this.Reveal.getConfig().showNotes&&this.element&&this.Reveal.getCurrentSlide()&&!this.Reveal.print.isPrintingPDF()&&(this.element.innerHTML=this.getSlideNotes()||'No notes on this slide.')}updateVisibility(){this.Reveal.getConfig().showNotes&&this.hasNotes()&&!this.Reveal.print.isPrintingPDF()?this.Reveal.getRevealElement().classList.add("show-notes"):this.Reveal.getRevealElement().classList.remove("show-notes")}hasNotes(){return this.Reveal.getSlidesElement().querySelectorAll("[data-notes], aside.notes").length>0}isSpeakerNotesWindow(){return!!window.location.search.match(/receiver/gi)}getSlideNotes(e=this.Reveal.getCurrentSlide()){if(e.hasAttribute("data-notes"))return e.getAttribute("data-notes");let t=e.querySelectorAll("aside.notes");return t?Array.from(t).map((e=>e.innerHTML)).join("\n"):null}destroy(){this.element.remove()}}class V{constructor(e,t){this.diameter=100,this.diameter2=this.diameter/2,this.thickness=6,this.playing=!1,this.progress=0,this.progressOffset=1,this.container=e,this.progressCheck=t,this.canvas=document.createElement("canvas"),this.canvas.className="playback",this.canvas.width=this.diameter,this.canvas.height=this.diameter,this.canvas.style.width=this.diameter2+"px",this.canvas.style.height=this.diameter2+"px",this.context=this.canvas.getContext("2d"),this.container.appendChild(this.canvas),this.render()}setPlaying(e){const t=this.playing;this.playing=e,!t&&this.playing?this.animate():this.render()}animate(){const e=this.progress;this.progress=this.progressCheck(),e>.8&&this.progress<.2&&(this.progressOffset=this.progress),this.render(),this.playing&&requestAnimationFrame(this.animate.bind(this))}render(){let e=this.playing?this.progress:0,t=this.diameter2-this.thickness,i=this.diameter2,n=this.diameter2,s=28;this.progressOffset+=.1*(1-this.progressOffset);const a=-Math.PI/2+e*(2*Math.PI),r=-Math.PI/2+this.progressOffset*(2*Math.PI);this.context.save(),this.context.clearRect(0,0,this.diameter,this.diameter),this.context.beginPath(),this.context.arc(i,n,t+4,0,2*Math.PI,!1),this.context.fillStyle="rgba( 0, 0, 0, 0.4 )",this.context.fill(),this.context.beginPath(),this.context.arc(i,n,t,0,2*Math.PI,!1),this.context.lineWidth=this.thickness,this.context.strokeStyle="rgba( 255, 255, 255, 0.2 )",this.context.stroke(),this.playing&&(this.context.beginPath(),this.context.arc(i,n,t,r,a,!1),this.context.lineWidth=this.thickness,this.context.strokeStyle="#fff",this.context.stroke()),this.context.translate(i-14,n-14),this.playing?(this.context.fillStyle="#fff",this.context.fillRect(0,0,10,s),this.context.fillRect(18,0,10,s)):(this.context.beginPath(),this.context.translate(4,0),this.context.moveTo(0,0),this.context.lineTo(24,14),this.context.lineTo(0,s),this.context.fillStyle="#fff",this.context.fill()),this.context.restore()}on(e,t){this.canvas.addEventListener(e,t,!1)}off(e,t){this.canvas.removeEventListener(e,t,!1)}destroy(){this.playing=!1,this.canvas.parentNode&&this.container.removeChild(this.canvas)}}var $={width:960,height:700,margin:.04,minScale:.2,maxScale:2,controls:!0,controlsTutorial:!0,controlsLayout:"bottom-right",controlsBackArrows:"faded",progress:!0,slideNumber:!1,showSlideNumber:"all",hashOneBasedIndex:!1,hash:!1,respondToHashChanges:!0,jumpToSlide:!0,history:!1,keyboard:!0,keyboardCondition:null,disableLayout:!1,overview:!0,center:!0,touch:!0,loop:!1,rtl:!1,navigationMode:"default",shuffle:!1,fragments:!0,fragmentInURL:!0,embedded:!1,help:!0,pause:!0,showNotes:!1,showHiddenSlides:!1,autoPlayMedia:null,preloadIframes:null,autoAnimate:!0,autoAnimateMatcher:null,autoAnimateEasing:"ease",autoAnimateDuration:1,autoAnimateUnmatched:!0,autoAnimateStyles:["opacity","color","background-color","padding","font-size","line-height","letter-spacing","border-width","border-color","border-radius","outline","outline-offset"],autoSlide:0,autoSlideStoppable:!0,autoSlideMethod:null,defaultTiming:null,mouseWheel:!1,previewLinks:!1,postMessage:!0,postMessageEvents:!1,focusBodyOnPageVisibilityChange:!0,transition:"slide",transitionSpeed:"default",backgroundTransition:"fade",parallaxBackgroundImage:"",parallaxBackgroundSize:"",parallaxBackgroundRepeat:"",parallaxBackgroundPosition:"",parallaxBackgroundHorizontal:null,parallaxBackgroundVertical:null,pdfMaxPagesPerSlide:Number.POSITIVE_INFINITY,pdfSeparateFragments:!0,pdfPageHeightOffset:-1,viewDistance:3,mobileViewDistance:2,display:"block",hideInactiveCursor:!0,hideCursorTime:5e3,sortFragmentsOnSync:!0,dependencies:[],plugins:[]};const X="4.5.0";function Y(a,l){arguments.length<2&&(l=arguments[0],a=document.querySelector(".reveal"));const h={};let u,v,p,m,f,E={},C=!1,x={hasNavigatedHorizontally:!1,hasNavigatedVertically:!1},H=[],U=1,j={layout:"",overview:""},Y={},_="idle",J=0,G=0,Q=-1,Z=!1,ee=new b(h),te=new y(h),ie=new w(h),ne=new P(h),se=new R(h),ae=new N(h),re=new M(h),oe=new I(h),le=new D(h),de=new T(h),ce=new F(h),he=new z(h),ue=new B(h),ge=new O(h),ve=new W(h),pe=new q(h),me=new K(h);function fe(e){if(!a)throw'Unable to find presentation root (
).';if(Y.wrapper=a,Y.slides=a.querySelector(".slides"),!Y.slides)throw'Unable to find slides container (
).';return E={...$,...E,...l,...e,...d()},be(),window.addEventListener("load",We,!1),ue.load(E.plugins,E.dependencies).then(ye),new Promise((e=>h.on("ready",e)))}function be(){!0===E.embedded?Y.viewport=r(a,".reveal-viewport")||a:(Y.viewport=document.body,document.documentElement.classList.add("reveal-full-page")),Y.viewport.classList.add("reveal-viewport")}function ye(){C=!0,we(),Ee(),Ce(),ke(),Le(),lt(),xe(),le.readURL(),se.update(!0),setTimeout((()=>{Y.slides.classList.remove("no-transition"),Y.wrapper.classList.add("ready"),Fe({type:"ready",data:{indexh:u,indexv:v,currentSlide:m}})}),1),ge.isPrintingPDF()&&(Ne(),"complete"===document.readyState?ge.setupPDF():window.addEventListener("load",(()=>{ge.setupPDF()})))}function we(){E.showHiddenSlides||t(Y.wrapper,'section[data-visibility="hidden"]').forEach((e=>{e.parentNode.removeChild(e)}))}function Ee(){Y.slides.classList.add("no-transition"),g?Y.wrapper.classList.add("no-hover"):Y.wrapper.classList.remove("no-hover"),se.render(),te.render(),ie.render(),de.render(),ce.render(),me.render(),Y.pauseOverlay=o(Y.wrapper,"div","pause-overlay",E.controls?'':null),Y.statusElement=Re(),Y.wrapper.setAttribute("role","application")}function Re(){let e=Y.wrapper.querySelector(".aria-status");return e||(e=document.createElement("div"),e.style.position="absolute",e.style.height="1px",e.style.width="1px",e.style.overflow="hidden",e.style.clip="rect( 1px, 1px, 1px, 1px )",e.classList.add("aria-status"),e.setAttribute("aria-live","polite"),e.setAttribute("aria-atomic","true"),Y.wrapper.appendChild(e)),e}function Se(e){Y.statusElement.textContent=e}function Ae(e){let t="";if(3===e.nodeType)t+=e.textContent;else if(1===e.nodeType){let i=e.getAttribute("aria-hidden"),n="none"===window.getComputedStyle(e).display;"true"===i||n||Array.from(e.childNodes).forEach((e=>{t+=Ae(e)}))}return t=t.trim(),""===t?"":t+" "}function ke(){setInterval((()=>{0===Y.wrapper.scrollTop&&0===Y.wrapper.scrollLeft||(Y.wrapper.scrollTop=0,Y.wrapper.scrollLeft=0)}),1e3)}function Le(){document.addEventListener("fullscreenchange",$t),document.addEventListener("webkitfullscreenchange",$t)}function Ce(){E.postMessage&&window.addEventListener("message",Ut,!1)}function xe(t){const n={...E};if("object"==typeof t&&e(E,t),!1===h.isReady())return;const s=Y.wrapper.querySelectorAll(S).length;Y.wrapper.classList.remove(n.transition),Y.wrapper.classList.add(E.transition),Y.wrapper.setAttribute("data-transition-speed",E.transitionSpeed),Y.wrapper.setAttribute("data-background-transition",E.backgroundTransition),Y.viewport.style.setProperty("--slide-width",E.width+"px"),Y.viewport.style.setProperty("--slide-height",E.height+"px"),E.shuffle&&dt(),i(Y.wrapper,"embedded",E.embedded),i(Y.wrapper,"rtl",E.rtl),i(Y.wrapper,"center",E.center),!1===E.pause&&Ze(),E.previewLinks?(He(),Be("[data-preview-link=false]")):(Be(),He("[data-preview-link]:not([data-preview-link=false])")),ne.reset(),f&&(f.destroy(),f=null),s>1&&E.autoSlide&&E.autoSlideStoppable&&(f=new V(Y.wrapper,(()=>Math.min(Math.max((Date.now()-Q)/J,0),1))),f.on("click",Yt),Z=!1),"default"!==E.navigationMode?Y.wrapper.setAttribute("data-navigation-mode",E.navigationMode):Y.wrapper.removeAttribute("data-navigation-mode"),me.configure(E,n),ve.configure(E,n),he.configure(E,n),de.configure(E,n),ce.configure(E,n),oe.configure(E,n),ae.configure(E,n),te.configure(E,n),rt()}function Pe(){window.addEventListener("resize",Kt,!1),E.touch&&pe.bind(),E.keyboard&&oe.bind(),E.progress&&ce.bind(),E.respondToHashChanges&&le.bind(),de.bind(),ve.bind(),Y.slides.addEventListener("click",Wt,!1),Y.slides.addEventListener("transitionend",jt,!1),Y.pauseOverlay.addEventListener("click",Ze,!1),E.focusBodyOnPageVisibilityChange&&document.addEventListener("visibilitychange",Vt,!1)}function Ne(){pe.unbind(),ve.unbind(),oe.unbind(),de.unbind(),ce.unbind(),le.unbind(),window.removeEventListener("resize",Kt,!1),Y.slides.removeEventListener("click",Wt,!1),Y.slides.removeEventListener("transitionend",jt,!1),Y.pauseOverlay.removeEventListener("click",Ze,!1)}function Me(){Ne(),Mt(),Be(),me.destroy(),ve.destroy(),ue.destroy(),he.destroy(),de.destroy(),ce.destroy(),se.destroy(),te.destroy(),ie.destroy(),document.removeEventListener("fullscreenchange",$t),document.removeEventListener("webkitfullscreenchange",$t),document.removeEventListener("visibilitychange",Vt,!1),window.removeEventListener("message",Ut,!1),window.removeEventListener("load",We,!1),Y.pauseOverlay&&Y.pauseOverlay.remove(),Y.statusElement&&Y.statusElement.remove(),document.documentElement.classList.remove("reveal-full-page"),Y.wrapper.classList.remove("ready","center","has-horizontal-slides","has-vertical-slides"),Y.wrapper.removeAttribute("data-transition-speed"),Y.wrapper.removeAttribute("data-background-transition"),Y.viewport.classList.remove("reveal-viewport"),Y.viewport.style.removeProperty("--slide-width"),Y.viewport.style.removeProperty("--slide-height"),Y.slides.style.removeProperty("width"),Y.slides.style.removeProperty("height"),Y.slides.style.removeProperty("zoom"),Y.slides.style.removeProperty("left"),Y.slides.style.removeProperty("top"),Y.slides.style.removeProperty("bottom"),Y.slides.style.removeProperty("right"),Y.slides.style.removeProperty("transform"),Array.from(Y.wrapper.querySelectorAll(S)).forEach((e=>{e.style.removeProperty("display"),e.style.removeProperty("top"),e.removeAttribute("hidden"),e.removeAttribute("aria-hidden")}))}function Ie(e,t,i){a.addEventListener(e,t,i)}function De(e,t,i){a.removeEventListener(e,t,i)}function Te(e){"string"==typeof e.layout&&(j.layout=e.layout),"string"==typeof e.overview&&(j.overview=e.overview),j.layout?s(Y.slides,j.layout+" "+j.overview):s(Y.slides,j.overview)}function Fe({target:t=Y.wrapper,type:i,data:n,bubbles:s=!0}){let a=document.createEvent("HTMLEvents",1,2);return a.initEvent(i,s,!0),e(a,n),t.dispatchEvent(a),t===Y.wrapper&&ze(i),a}function ze(t,i){if(E.postMessageEvents&&window.parent!==window.self){let n={namespace:"reveal",eventName:t,state:xt()};e(n,i),window.parent.postMessage(JSON.stringify(n),"*")}}function He(e="a"){Array.from(Y.wrapper.querySelectorAll(e)).forEach((e=>{/^(http|www)/gi.test(e.getAttribute("href"))&&e.addEventListener("click",Xt,!1)}))}function Be(e="a"){Array.from(Y.wrapper.querySelectorAll(e)).forEach((e=>{/^(http|www)/gi.test(e.getAttribute("href"))&&e.removeEventListener("click",Xt,!1)}))}function Oe(e){je(),Y.overlay=document.createElement("div"),Y.overlay.classList.add("overlay"),Y.overlay.classList.add("overlay-preview"),Y.wrapper.appendChild(Y.overlay),Y.overlay.innerHTML=`
\n\t\t\t\t\n\t\t\t\t\n\t\t\t
\n\t\t\t
\n\t\t\t
\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\tUnable to load iframe. This is likely due to the site's policy (x-frame-options).\n\t\t\t\t\n\t\t\t
`,Y.overlay.querySelector("iframe").addEventListener("load",(e=>{Y.overlay.classList.add("loaded")}),!1),Y.overlay.querySelector(".close").addEventListener("click",(e=>{je(),e.preventDefault()}),!1),Y.overlay.querySelector(".external").addEventListener("click",(e=>{je()}),!1)}function qe(e){"boolean"==typeof e?e?Ue():je():Y.overlay?je():Ue()}function Ue(){if(E.help){je(),Y.overlay=document.createElement("div"),Y.overlay.classList.add("overlay"),Y.overlay.classList.add("overlay-help"),Y.wrapper.appendChild(Y.overlay);let e='

Keyboard Shortcuts


',t=oe.getShortcuts(),i=oe.getBindings();e+="";for(let i in t)e+=``;for(let t in i)i[t].key&&i[t].description&&(e+=``);e+="
KEYACTION
${i}${t[i]}
${i[t].key}${i[t].description}
",Y.overlay.innerHTML=`\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
${e}
\n\t\t\t\t
\n\t\t\t`,Y.overlay.querySelector(".close").addEventListener("click",(e=>{je(),e.preventDefault()}),!1)}}function je(){return!!Y.overlay&&(Y.overlay.parentNode.removeChild(Y.overlay),Y.overlay=null,!0)}function We(){if(Y.wrapper&&!ge.isPrintingPDF()){if(!E.disableLayout){g&&!E.embedded&&document.documentElement.style.setProperty("--vh",.01*window.innerHeight+"px");const e=Ve(),t=U;Ke(E.width,E.height),Y.slides.style.width=e.width+"px",Y.slides.style.height=e.height+"px",U=Math.min(e.presentationWidth/e.width,e.presentationHeight/e.height),U=Math.max(U,E.minScale),U=Math.min(U,E.maxScale),1===U?(Y.slides.style.zoom="",Y.slides.style.left="",Y.slides.style.top="",Y.slides.style.bottom="",Y.slides.style.right="",Te({layout:""})):(Y.slides.style.zoom="",Y.slides.style.left="50%",Y.slides.style.top="50%",Y.slides.style.bottom="auto",Y.slides.style.right="auto",Te({layout:"translate(-50%, -50%) scale("+U+")"}));const i=Array.from(Y.wrapper.querySelectorAll(S));for(let t=0,n=i.length;t .stretch, section > .r-stretch").forEach((t=>{let n=c(t,i);if(/(img|video)/gi.test(t.nodeName)){const i=t.naturalWidth||t.videoWidth,s=t.naturalHeight||t.videoHeight,a=Math.min(e/i,n/s);t.style.width=i*a+"px",t.style.height=s*a+"px"}else t.style.width=e+"px",t.style.height=n+"px"}))}function Ve(e,t){let i=E.width,n=E.height;E.disableLayout&&(i=Y.slides.offsetWidth,n=Y.slides.offsetHeight);const s={width:i,height:n,presentationWidth:e||Y.wrapper.offsetWidth,presentationHeight:t||Y.wrapper.offsetHeight};return s.presentationWidth-=s.presentationWidth*E.margin,s.presentationHeight-=s.presentationHeight*E.margin,"string"==typeof s.width&&/%$/.test(s.width)&&(s.width=parseInt(s.width,10)/100*s.presentationWidth),"string"==typeof s.height&&/%$/.test(s.height)&&(s.height=parseInt(s.height,10)/100*s.presentationHeight),s}function $e(e,t){"object"==typeof e&&"function"==typeof e.setAttribute&&e.setAttribute("data-previous-indexv",t||0)}function Xe(e){if("object"==typeof e&&"function"==typeof e.setAttribute&&e.classList.contains("stack")){const t=e.hasAttribute("data-start-indexv")?"data-start-indexv":"data-previous-indexv";return parseInt(e.getAttribute(t)||0,10)}return 0}function Ye(e=m){return e&&e.parentNode&&!!e.parentNode.nodeName.match(/section/i)}function _e(){return!(!m||!Ye(m))&&!m.nextElementSibling}function Je(){return 0===u&&0===v}function Ge(){return!!m&&(!m.nextElementSibling&&(!Ye(m)||!m.parentNode.nextElementSibling))}function Qe(){if(E.pause){const e=Y.wrapper.classList.contains("paused");Mt(),Y.wrapper.classList.add("paused"),!1===e&&Fe({type:"paused"})}}function Ze(){const e=Y.wrapper.classList.contains("paused");Y.wrapper.classList.remove("paused"),Nt(),e&&Fe({type:"resumed"})}function et(e){"boolean"==typeof e?e?Qe():Ze():tt()?Ze():Qe()}function tt(){return Y.wrapper.classList.contains("paused")}function it(e){"boolean"==typeof e?e?ie.show():ie.hide():ie.isVisible()?ie.hide():ie.show()}function nt(e){"boolean"==typeof e?e?Dt():It():Z?Dt():It()}function st(){return!(!J||Z)}function at(e,t,i,n){if(Fe({type:"beforeslidechange",data:{indexh:void 0===e?u:e,indexv:void 0===t?v:t,origin:n}}).defaultPrevented)return;p=m;const s=Y.wrapper.querySelectorAll(A);if(0===s.length)return;void 0!==t||re.isActive()||(t=Xe(s[e])),p&&p.parentNode&&p.parentNode.classList.contains("stack")&&$e(p.parentNode,v);const a=H.concat();H.length=0;let r=u||0,o=v||0;u=ct(A,void 0===e?u:e),v=ct(k,void 0===t?v:t);let l=u!==r||v!==o;l||(p=null);let d=s[u],c=d.querySelectorAll("section");m=c[v]||d;let h=!1;l&&p&&m&&!re.isActive()&&(p.hasAttribute("data-auto-animate")&&m.hasAttribute("data-auto-animate")&&p.getAttribute("data-auto-animate-id")===m.getAttribute("data-auto-animate-id")&&!(u>r||v>o?m:p).hasAttribute("data-auto-animate-restart")&&(h=!0,Y.slides.classList.add("disable-slide-transitions")),_="running"),gt(),We(),re.isActive()&&re.update(),void 0!==i&&ae.goto(i),p&&p!==m&&(p.classList.remove("present"),p.setAttribute("aria-hidden","true"),Je()&&setTimeout((()=>{Et().forEach((e=>{$e(e,0)}))}),0));e:for(let e=0,t=H.length;e{Se(Ae(m))})),ce.update(),de.update(),me.update(),se.update(),se.updateParallax(),te.update(),ae.update(),le.writeURL(),Nt(),h&&(setTimeout((()=>{Y.slides.classList.remove("disable-slide-transitions")}),0),E.autoAnimate&&ne.run(p,m))}function rt(){Ne(),Pe(),We(),J=E.autoSlide,Nt(),se.create(),le.writeURL(),!0===E.sortFragmentsOnSync&&ae.sortAll(),de.update(),ce.update(),gt(),me.update(),me.updateVisibility(),se.update(!0),te.update(),ee.formatEmbeddedContent(),!1===E.autoPlayMedia?ee.stopEmbeddedContent(m,{unloadIframes:!1}):ee.startEmbeddedContent(m),re.isActive()&&re.layout()}function ot(e=m){se.sync(e),ae.sync(e),ee.load(e),se.update(),me.update()}function lt(){yt().forEach((e=>{t(e,"section").forEach(((e,t)=>{t>0&&(e.classList.remove("present"),e.classList.remove("past"),e.classList.add("future"),e.setAttribute("aria-hidden","true"))}))}))}function dt(e=yt()){e.forEach(((t,i)=>{let n=e[Math.floor(Math.random()*e.length)];n.parentNode===t.parentNode&&t.parentNode.insertBefore(t,n);let s=t.querySelectorAll("section");s.length&&dt(s)}))}function ct(e,i){let n=t(Y.wrapper,e),s=n.length,a=ge.isPrintingPDF(),r=!1,o=!1;if(s){E.loop&&(i>=s&&(r=!0),(i%=s)<0&&(i=s+i,o=!0)),i=Math.max(Math.min(i,s-1),0);for(let e=0;ei?(t.classList.add(s?"past":"future"),E.fragments&&ut(t)):e===i&&E.fragments&&(r?ut(t):o&&ht(t))}let e=n[i],t=e.classList.contains("present");e.classList.add("present"),e.removeAttribute("hidden"),e.removeAttribute("aria-hidden"),t||Fe({target:e,type:"visible",bubbles:!1});let l=e.getAttribute("data-state");l&&(H=H.concat(l.split(" ")))}else i=0;return i}function ht(e){t(e,".fragment").forEach((e=>{e.classList.add("visible"),e.classList.remove("current-fragment")}))}function ut(e){t(e,".fragment.visible").forEach((e=>{e.classList.remove("visible","current-fragment")}))}function gt(){let e,i,n=yt(),s=n.length;if(s&&void 0!==u){let a=re.isActive()?10:E.viewDistance;g&&(a=re.isActive()?6:E.mobileViewDistance),ge.isPrintingPDF()&&(a=Number.MAX_VALUE);for(let r=0;r0,right:u0,down:v1&&(n.left=!0,n.right=!0),i.length>1&&(n.up=!0,n.down=!0)),t.length>1&&"linear"===E.navigationMode&&(n.right=n.right||n.down,n.left=n.left||n.up),!0===e){let e=ae.availableRoutes();n.left=n.left||e.prev,n.up=n.up||e.prev,n.down=n.down||e.next,n.right=n.right||e.next}if(E.rtl){let e=n.left;n.left=n.right,n.right=e}return n}function pt(e=m){let t=yt(),i=0;e:for(let n=0;n0){let i=.9;t+=m.querySelectorAll(".fragment.visible").length/e.length*i}}return Math.min(t/(e-1),1)}function ft(e){let i,n=u,s=v;if(e){let i=Ye(e),a=i?e.parentNode:e,r=yt();n=Math.max(r.indexOf(a),0),s=void 0,i&&(s=Math.max(t(e.parentNode,"section").indexOf(e),0))}if(!e&&m){if(m.querySelectorAll(".fragment").length>0){let e=m.querySelector(".current-fragment");i=e&&e.hasAttribute("data-fragment-index")?parseInt(e.getAttribute("data-fragment-index"),10):m.querySelectorAll(".fragment.visible").length-1}}return{h:n,v:s,f:i}}function bt(){return t(Y.wrapper,S+':not(.stack):not([data-visibility="uncounted"])')}function yt(){return t(Y.wrapper,A)}function wt(){return t(Y.wrapper,".slides>section>section")}function Et(){return t(Y.wrapper,A+".stack")}function Rt(){return yt().length>1}function St(){return wt().length>1}function At(){return bt().map((e=>{let t={};for(let i=0;i{e.hasAttribute("data-autoplay")&&J&&1e3*e.duration/e.playbackRate>J&&(J=1e3*e.duration/e.playbackRate+1e3)}))),!J||Z||tt()||re.isActive()||Ge()&&!ae.availableRoutes().next&&!0!==E.loop||(G=setTimeout((()=>{"function"==typeof E.autoSlideMethod?E.autoSlideMethod():Ot(),Nt()}),J),Q=Date.now()),f&&f.setPlaying(-1!==G)}}function Mt(){clearTimeout(G),G=-1}function It(){J&&!Z&&(Z=!0,Fe({type:"autoslidepaused"}),clearTimeout(G),f&&f.setPlaying(!1))}function Dt(){J&&Z&&(Z=!1,Fe({type:"autoslideresumed"}),Nt())}function Tt({skipFragments:e=!1}={}){x.hasNavigatedHorizontally=!0,E.rtl?(re.isActive()||e||!1===ae.next())&&vt().left&&at(u+1,"grid"===E.navigationMode?v:void 0):(re.isActive()||e||!1===ae.prev())&&vt().left&&at(u-1,"grid"===E.navigationMode?v:void 0)}function Ft({skipFragments:e=!1}={}){x.hasNavigatedHorizontally=!0,E.rtl?(re.isActive()||e||!1===ae.prev())&&vt().right&&at(u-1,"grid"===E.navigationMode?v:void 0):(re.isActive()||e||!1===ae.next())&&vt().right&&at(u+1,"grid"===E.navigationMode?v:void 0)}function zt({skipFragments:e=!1}={}){(re.isActive()||e||!1===ae.prev())&&vt().up&&at(u,v-1)}function Ht({skipFragments:e=!1}={}){x.hasNavigatedVertically=!0,(re.isActive()||e||!1===ae.next())&&vt().down&&at(u,v+1)}function Bt({skipFragments:e=!1}={}){if(e||!1===ae.prev())if(vt().up)zt({skipFragments:e});else{let i;if(i=E.rtl?t(Y.wrapper,A+".future").pop():t(Y.wrapper,A+".past").pop(),i&&i.classList.contains("stack")){let e=i.querySelectorAll("section").length-1||void 0;at(u-1,e)}else Tt({skipFragments:e})}}function Ot({skipFragments:e=!1}={}){if(x.hasNavigatedHorizontally=!0,x.hasNavigatedVertically=!0,e||!1===ae.next()){let t=vt();t.down&&t.right&&E.loop&&_e()&&(t.down=!1),t.down?Ht({skipFragments:e}):E.rtl?Tt({skipFragments:e}):Ft({skipFragments:e})}}function qt(e){E.autoSlideStoppable&&It()}function Ut(e){let t=e.data;if("string"==typeof t&&"{"===t.charAt(0)&&"}"===t.charAt(t.length-1)&&(t=JSON.parse(t),t.method&&"function"==typeof h[t.method]))if(!1===L.test(t.method)){const e=h[t.method].apply(h,t.args);ze("callback",{method:t.method,result:e})}else console.warn('reveal.js: "'+t.method+'" is is blacklisted from the postMessage API')}function jt(e){"running"===_&&/section/gi.test(e.target.nodeName)&&(_="idle",Fe({type:"slidetransitionend",data:{indexh:u,indexv:v,previousSlide:p,currentSlide:m}}))}function Wt(e){const t=r(e.target,'a[href^="#"]');if(t){const i=t.getAttribute("href"),n=le.getIndicesFromHash(i);n&&(h.slide(n.h,n.v,n.f),e.preventDefault())}}function Kt(e){We()}function Vt(e){!1===document.hidden&&document.activeElement!==document.body&&("function"==typeof document.activeElement.blur&&document.activeElement.blur(),document.body.focus())}function $t(e){(document.fullscreenElement||document.webkitFullscreenElement)===Y.wrapper&&(e.stopImmediatePropagation(),setTimeout((()=>{h.layout(),h.focus.focus()}),1))}function Xt(e){if(e.currentTarget&&e.currentTarget.hasAttribute("href")){let t=e.currentTarget.getAttribute("href");t&&(Oe(t),e.preventDefault())}}function Yt(e){Ge()&&!1===E.loop?(at(0,0),Dt()):Z?Dt():It()}const _t={VERSION:X,initialize:fe,configure:xe,destroy:Me,sync:rt,syncSlide:ot,syncFragments:ae.sync.bind(ae),slide:at,left:Tt,right:Ft,up:zt,down:Ht,prev:Bt,next:Ot,navigateLeft:Tt,navigateRight:Ft,navigateUp:zt,navigateDown:Ht,navigatePrev:Bt,navigateNext:Ot,navigateFragment:ae.goto.bind(ae),prevFragment:ae.prev.bind(ae),nextFragment:ae.next.bind(ae),on:Ie,off:De,addEventListener:Ie,removeEventListener:De,layout:We,shuffle:dt,availableRoutes:vt,availableFragments:ae.availableRoutes.bind(ae),toggleHelp:qe,toggleOverview:re.toggle.bind(re),togglePause:et,toggleAutoSlide:nt,toggleJumpToSlide:it,isFirstSlide:Je,isLastSlide:Ge,isLastVerticalSlide:_e,isVerticalSlide:Ye,isPaused:tt,isAutoSliding:st,isSpeakerNotes:me.isSpeakerNotesWindow.bind(me),isOverview:re.isActive.bind(re),isFocused:ve.isFocused.bind(ve),isPrintingPDF:ge.isPrintingPDF.bind(ge),isReady:()=>C,loadSlide:ee.load.bind(ee),unloadSlide:ee.unload.bind(ee),showPreview:Oe,hidePreview:je,addEventListeners:Pe,removeEventListeners:Ne,dispatchEvent:Fe,getState:xt,setState:Pt,getProgress:mt,getIndices:ft,getSlidesAttributes:At,getSlidePastCount:pt,getTotalSlides:kt,getSlide:Lt,getPreviousSlide:()=>p,getCurrentSlide:()=>m,getSlideBackground:Ct,getSlideNotes:me.getSlideNotes.bind(me),getSlides:bt,getHorizontalSlides:yt,getVerticalSlides:wt,hasHorizontalSlides:Rt,hasVerticalSlides:St,hasNavigatedHorizontally:()=>x.hasNavigatedHorizontally,hasNavigatedVertically:()=>x.hasNavigatedVertically,addKeyBinding:oe.addKeyBinding.bind(oe),removeKeyBinding:oe.removeKeyBinding.bind(oe),triggerKey:oe.triggerKey.bind(oe),registerKeyboardShortcut:oe.registerKeyboardShortcut.bind(oe),getComputedSlideSize:Ve,getScale:()=>U,getConfig:()=>E,getQueryHash:d,getSlidePath:le.getHash.bind(le),getRevealElement:()=>a,getSlidesElement:()=>Y.slides,getViewportElement:()=>Y.viewport,getBackgroundsElement:()=>se.element,registerPlugin:ue.registerPlugin.bind(ue),hasPlugin:ue.hasPlugin.bind(ue),getPlugin:ue.getPlugin.bind(ue),getPlugins:ue.getRegisteredPlugins.bind(ue)};return e(h,{..._t,announceStatus:Se,getStatusText:Ae,print:ge,focus:ve,progress:ce,controls:de,location:le,overview:re,fragments:ae,slideContent:ee,slideNumber:te,onUserInput:qt,closeOverlay:je,updateSlidesVisibility:gt,layoutSlideContents:Ke,transformSlides:Te,cueAutoSlide:Nt,cancelAutoSlide:Mt}),_t}let _=Y,J=[];_.initialize=e=>(Object.assign(_,new Y(document.querySelector(".reveal"),e)),J.map((e=>e(_))),_.initialize()),["configure","on","off","addEventListener","removeEventListener","registerPlugin"].forEach((e=>{_[e]=(...t)=>{J.push((i=>i[e].call(null,...t)))}})),_.isReady=()=>!1,_.VERSION=X;export default _; +//# sourceMappingURL=reveal.esm.js.map diff --git a/revealjs/dist/reveal.esm.js.map b/revealjs/dist/reveal.esm.js.map new file mode 100644 index 0000000..2866e41 --- /dev/null +++ b/revealjs/dist/reveal.esm.js.map @@ -0,0 +1 @@ +{"version":3,"file":"reveal.esm.js","sources":["../js/utils/util.js","../js/utils/device.js","../node_modules/fitty/dist/fitty.module.js","../js/controllers/slidecontent.js","../js/controllers/slidenumber.js","../js/controllers/jumptoslide.js","../js/utils/color.js","../js/controllers/backgrounds.js","../js/utils/constants.js","../js/controllers/autoanimate.js","../js/controllers/fragments.js","../js/controllers/overview.js","../js/controllers/keyboard.js","../js/controllers/location.js","../js/controllers/controls.js","../js/controllers/progress.js","../js/controllers/pointer.js","../js/utils/loader.js","../js/controllers/plugins.js","../js/controllers/print.js","../js/controllers/touch.js","../js/controllers/focus.js","../js/controllers/notes.js","../js/components/playback.js","../js/config.js","../js/reveal.js","../js/index.js"],"sourcesContent":["/**\n * Extend object a with the properties of object b.\n * If there's a conflict, object b takes precedence.\n *\n * @param {object} a\n * @param {object} b\n */\nexport const extend = ( a, b ) => {\n\n\tfor( let i in b ) {\n\t\ta[ i ] = b[ i ];\n\t}\n\n\treturn a;\n\n}\n\n/**\n * querySelectorAll but returns an Array.\n */\nexport const queryAll = ( el, selector ) => {\n\n\treturn Array.from( el.querySelectorAll( selector ) );\n\n}\n\n/**\n * classList.toggle() with cross browser support\n */\nexport const toggleClass = ( el, className, value ) => {\n\tif( value ) {\n\t\tel.classList.add( className );\n\t}\n\telse {\n\t\tel.classList.remove( className );\n\t}\n}\n\n/**\n * Utility for deserializing a value.\n *\n * @param {*} value\n * @return {*}\n */\nexport const deserialize = ( value ) => {\n\n\tif( typeof value === 'string' ) {\n\t\tif( value === 'null' ) return null;\n\t\telse if( value === 'true' ) return true;\n\t\telse if( value === 'false' ) return false;\n\t\telse if( value.match( /^-?[\\d\\.]+$/ ) ) return parseFloat( value );\n\t}\n\n\treturn value;\n\n}\n\n/**\n * Measures the distance in pixels between point a\n * and point b.\n *\n * @param {object} a point with x/y properties\n * @param {object} b point with x/y properties\n *\n * @return {number}\n */\nexport const distanceBetween = ( a, b ) => {\n\n\tlet dx = a.x - b.x,\n\t\tdy = a.y - b.y;\n\n\treturn Math.sqrt( dx*dx + dy*dy );\n\n}\n\n/**\n * Applies a CSS transform to the target element.\n *\n * @param {HTMLElement} element\n * @param {string} transform\n */\nexport const transformElement = ( element, transform ) => {\n\n\telement.style.transform = transform;\n\n}\n\n/**\n * Element.matches with IE support.\n *\n * @param {HTMLElement} target The element to match\n * @param {String} selector The CSS selector to match\n * the element against\n *\n * @return {Boolean}\n */\nexport const matches = ( target, selector ) => {\n\n\tlet matchesMethod = target.matches || target.matchesSelector || target.msMatchesSelector;\n\n\treturn !!( matchesMethod && matchesMethod.call( target, selector ) );\n\n}\n\n/**\n * Find the closest parent that matches the given\n * selector.\n *\n * @param {HTMLElement} target The child element\n * @param {String} selector The CSS selector to match\n * the parents against\n *\n * @return {HTMLElement} The matched parent or null\n * if no matching parent was found\n */\nexport const closest = ( target, selector ) => {\n\n\t// Native Element.closest\n\tif( typeof target.closest === 'function' ) {\n\t\treturn target.closest( selector );\n\t}\n\n\t// Polyfill\n\twhile( target ) {\n\t\tif( matches( target, selector ) ) {\n\t\t\treturn target;\n\t\t}\n\n\t\t// Keep searching\n\t\ttarget = target.parentNode;\n\t}\n\n\treturn null;\n\n}\n\n/**\n * Handling the fullscreen functionality via the fullscreen API\n *\n * @see http://fullscreen.spec.whatwg.org/\n * @see https://developer.mozilla.org/en-US/docs/DOM/Using_fullscreen_mode\n */\nexport const enterFullscreen = element => {\n\n\telement = element || document.documentElement;\n\n\t// Check which implementation is available\n\tlet requestMethod = element.requestFullscreen ||\n\t\t\t\t\t\telement.webkitRequestFullscreen ||\n\t\t\t\t\t\telement.webkitRequestFullScreen ||\n\t\t\t\t\t\telement.mozRequestFullScreen ||\n\t\t\t\t\t\telement.msRequestFullscreen;\n\n\tif( requestMethod ) {\n\t\trequestMethod.apply( element );\n\t}\n\n}\n\n/**\n * Creates an HTML element and returns a reference to it.\n * If the element already exists the existing instance will\n * be returned.\n *\n * @param {HTMLElement} container\n * @param {string} tagname\n * @param {string} classname\n * @param {string} innerHTML\n *\n * @return {HTMLElement}\n */\nexport const createSingletonNode = ( container, tagname, classname, innerHTML='' ) => {\n\n\t// Find all nodes matching the description\n\tlet nodes = container.querySelectorAll( '.' + classname );\n\n\t// Check all matches to find one which is a direct child of\n\t// the specified container\n\tfor( let i = 0; i < nodes.length; i++ ) {\n\t\tlet testNode = nodes[i];\n\t\tif( testNode.parentNode === container ) {\n\t\t\treturn testNode;\n\t\t}\n\t}\n\n\t// If no node was found, create it now\n\tlet node = document.createElement( tagname );\n\tnode.className = classname;\n\tnode.innerHTML = innerHTML;\n\tcontainer.appendChild( node );\n\n\treturn node;\n\n}\n\n/**\n * Injects the given CSS styles into the DOM.\n *\n * @param {string} value\n */\nexport const createStyleSheet = ( value ) => {\n\n\tlet tag = document.createElement( 'style' );\n\ttag.type = 'text/css';\n\n\tif( value && value.length > 0 ) {\n\t\tif( tag.styleSheet ) {\n\t\t\ttag.styleSheet.cssText = value;\n\t\t}\n\t\telse {\n\t\t\ttag.appendChild( document.createTextNode( value ) );\n\t\t}\n\t}\n\n\tdocument.head.appendChild( tag );\n\n\treturn tag;\n\n}\n\n/**\n * Returns a key:value hash of all query params.\n */\nexport const getQueryHash = () => {\n\n\tlet query = {};\n\n\tlocation.search.replace( /[A-Z0-9]+?=([\\w\\.%-]*)/gi, a => {\n\t\tquery[ a.split( '=' ).shift() ] = a.split( '=' ).pop();\n\t} );\n\n\t// Basic deserialization\n\tfor( let i in query ) {\n\t\tlet value = query[ i ];\n\n\t\tquery[ i ] = deserialize( unescape( value ) );\n\t}\n\n\t// Do not accept new dependencies via query config to avoid\n\t// the potential of malicious script injection\n\tif( typeof query['dependencies'] !== 'undefined' ) delete query['dependencies'];\n\n\treturn query;\n\n}\n\n/**\n * Returns the remaining height within the parent of the\n * target element.\n *\n * remaining height = [ configured parent height ] - [ current parent height ]\n *\n * @param {HTMLElement} element\n * @param {number} [height]\n */\nexport const getRemainingHeight = ( element, height = 0 ) => {\n\n\tif( element ) {\n\t\tlet newHeight, oldHeight = element.style.height;\n\n\t\t// Change the .stretch element height to 0 in order find the height of all\n\t\t// the other elements\n\t\telement.style.height = '0px';\n\n\t\t// In Overview mode, the parent (.slide) height is set of 700px.\n\t\t// Restore it temporarily to its natural height.\n\t\telement.parentNode.style.height = 'auto';\n\n\t\tnewHeight = height - element.parentNode.offsetHeight;\n\n\t\t// Restore the old height, just in case\n\t\telement.style.height = oldHeight + 'px';\n\n\t\t// Clear the parent (.slide) height. .removeProperty works in IE9+\n\t\telement.parentNode.style.removeProperty('height');\n\n\t\treturn newHeight;\n\t}\n\n\treturn height;\n\n}\n\nconst fileExtensionToMimeMap = {\n\t'mp4': 'video/mp4',\n\t'm4a': 'video/mp4',\n\t'ogv': 'video/ogg',\n\t'mpeg': 'video/mpeg',\n\t'webm': 'video/webm'\n}\n\n/**\n * Guess the MIME type for common file formats.\n */\nexport const getMimeTypeFromFile = ( filename='' ) => {\n\treturn fileExtensionToMimeMap[filename.split('.').pop()]\n}\n\n/**\n * Encodes a string for RFC3986-compliant URL format.\n * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURI#encoding_for_rfc3986\n *\n * @param {string} url\n */\nexport const encodeRFC3986URI = ( url='' ) => {\n\treturn encodeURI(url)\n\t .replace(/%5B/g, \"[\")\n\t .replace(/%5D/g, \"]\")\n\t .replace(\n\t\t/[!'()*]/g,\n\t\t(c) => `%${c.charCodeAt(0).toString(16).toUpperCase()}`\n\t );\n}","const UA = navigator.userAgent;\n\nexport const isMobile = /(iphone|ipod|ipad|android)/gi.test( UA ) ||\n\t\t\t\t\t\t( navigator.platform === 'MacIntel' && navigator.maxTouchPoints > 1 ); // iPadOS\n\nexport const isChrome = /chrome/i.test( UA ) && !/edge/i.test( UA );\n\nexport const isAndroid = /android/gi.test( UA );","/*\n * fitty v2.3.3 - Snugly resizes text to fit its parent container\n * Copyright (c) 2020 Rik Schennink (https://pqina.nl/)\n */\n'use strict';\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\n\nvar _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nexports.default = function (w) {\n\n // no window, early exit\n if (!w) return;\n\n // node list to array helper method\n var toArray = function toArray(nl) {\n return [].slice.call(nl);\n };\n\n // states\n var DrawState = {\n IDLE: 0,\n DIRTY_CONTENT: 1,\n DIRTY_LAYOUT: 2,\n DIRTY: 3\n };\n\n // all active fitty elements\n var fitties = [];\n\n // group all redraw calls till next frame, we cancel each frame request when a new one comes in. If no support for request animation frame, this is an empty function and supports for fitty stops.\n var redrawFrame = null;\n var requestRedraw = 'requestAnimationFrame' in w ? function () {\n w.cancelAnimationFrame(redrawFrame);\n redrawFrame = w.requestAnimationFrame(function () {\n return redraw(fitties.filter(function (f) {\n return f.dirty && f.active;\n }));\n });\n } : function () {};\n\n // sets all fitties to dirty so they are redrawn on the next redraw loop, then calls redraw\n var redrawAll = function redrawAll(type) {\n return function () {\n fitties.forEach(function (f) {\n return f.dirty = type;\n });\n requestRedraw();\n };\n };\n\n // redraws fitties so they nicely fit their parent container\n var redraw = function redraw(fitties) {\n\n // getting info from the DOM at this point should not trigger a reflow, let's gather as much intel as possible before triggering a reflow\n\n // check if styles of all fitties have been computed\n fitties.filter(function (f) {\n return !f.styleComputed;\n }).forEach(function (f) {\n f.styleComputed = computeStyle(f);\n });\n\n // restyle elements that require pre-styling, this triggers a reflow, please try to prevent by adding CSS rules (see docs)\n fitties.filter(shouldPreStyle).forEach(applyStyle);\n\n // we now determine which fitties should be redrawn\n var fittiesToRedraw = fitties.filter(shouldRedraw);\n\n // we calculate final styles for these fitties\n fittiesToRedraw.forEach(calculateStyles);\n\n // now we apply the calculated styles from our previous loop\n fittiesToRedraw.forEach(function (f) {\n applyStyle(f);\n markAsClean(f);\n });\n\n // now we dispatch events for all restyled fitties\n fittiesToRedraw.forEach(dispatchFitEvent);\n };\n\n var markAsClean = function markAsClean(f) {\n return f.dirty = DrawState.IDLE;\n };\n\n var calculateStyles = function calculateStyles(f) {\n\n // get available width from parent node\n f.availableWidth = f.element.parentNode.clientWidth;\n\n // the space our target element uses\n f.currentWidth = f.element.scrollWidth;\n\n // remember current font size\n f.previousFontSize = f.currentFontSize;\n\n // let's calculate the new font size\n f.currentFontSize = Math.min(Math.max(f.minSize, f.availableWidth / f.currentWidth * f.previousFontSize), f.maxSize);\n\n // if allows wrapping, only wrap when at minimum font size (otherwise would break container)\n f.whiteSpace = f.multiLine && f.currentFontSize === f.minSize ? 'normal' : 'nowrap';\n };\n\n // should always redraw if is not dirty layout, if is dirty layout, only redraw if size has changed\n var shouldRedraw = function shouldRedraw(f) {\n return f.dirty !== DrawState.DIRTY_LAYOUT || f.dirty === DrawState.DIRTY_LAYOUT && f.element.parentNode.clientWidth !== f.availableWidth;\n };\n\n // every fitty element is tested for invalid styles\n var computeStyle = function computeStyle(f) {\n\n // get style properties\n var style = w.getComputedStyle(f.element, null);\n\n // get current font size in pixels (if we already calculated it, use the calculated version)\n f.currentFontSize = parseFloat(style.getPropertyValue('font-size'));\n\n // get display type and wrap mode\n f.display = style.getPropertyValue('display');\n f.whiteSpace = style.getPropertyValue('white-space');\n };\n\n // determines if this fitty requires initial styling, can be prevented by applying correct styles through CSS\n var shouldPreStyle = function shouldPreStyle(f) {\n\n var preStyle = false;\n\n // if we already tested for prestyling we don't have to do it again\n if (f.preStyleTestCompleted) return false;\n\n // should have an inline style, if not, apply\n if (!/inline-/.test(f.display)) {\n preStyle = true;\n f.display = 'inline-block';\n }\n\n // to correctly calculate dimensions the element should have whiteSpace set to nowrap\n if (f.whiteSpace !== 'nowrap') {\n preStyle = true;\n f.whiteSpace = 'nowrap';\n }\n\n // we don't have to do this twice\n f.preStyleTestCompleted = true;\n\n return preStyle;\n };\n\n // apply styles to single fitty\n var applyStyle = function applyStyle(f) {\n f.element.style.whiteSpace = f.whiteSpace;\n f.element.style.display = f.display;\n f.element.style.fontSize = f.currentFontSize + 'px';\n };\n\n // dispatch a fit event on a fitty\n var dispatchFitEvent = function dispatchFitEvent(f) {\n f.element.dispatchEvent(new CustomEvent('fit', {\n detail: {\n oldValue: f.previousFontSize,\n newValue: f.currentFontSize,\n scaleFactor: f.currentFontSize / f.previousFontSize\n }\n }));\n };\n\n // fit method, marks the fitty as dirty and requests a redraw (this will also redraw any other fitty marked as dirty)\n var fit = function fit(f, type) {\n return function () {\n f.dirty = type;\n if (!f.active) return;\n requestRedraw();\n };\n };\n\n var init = function init(f) {\n\n // save some of the original CSS properties before we change them\n f.originalStyle = {\n whiteSpace: f.element.style.whiteSpace,\n display: f.element.style.display,\n fontSize: f.element.style.fontSize\n };\n\n // should we observe DOM mutations\n observeMutations(f);\n\n // this is a new fitty so we need to validate if it's styles are in order\n f.newbie = true;\n\n // because it's a new fitty it should also be dirty, we want it to redraw on the first loop\n f.dirty = true;\n\n // we want to be able to update this fitty\n fitties.push(f);\n };\n\n var destroy = function destroy(f) {\n return function () {\n\n // remove from fitties array\n fitties = fitties.filter(function (_) {\n return _.element !== f.element;\n });\n\n // stop observing DOM\n if (f.observeMutations) f.observer.disconnect();\n\n // reset the CSS properties we changes\n f.element.style.whiteSpace = f.originalStyle.whiteSpace;\n f.element.style.display = f.originalStyle.display;\n f.element.style.fontSize = f.originalStyle.fontSize;\n };\n };\n\n // add a new fitty, does not redraw said fitty\n var subscribe = function subscribe(f) {\n return function () {\n if (f.active) return;\n f.active = true;\n requestRedraw();\n };\n };\n\n // remove an existing fitty\n var unsubscribe = function unsubscribe(f) {\n return function () {\n return f.active = false;\n };\n };\n\n var observeMutations = function observeMutations(f) {\n\n // no observing?\n if (!f.observeMutations) return;\n\n // start observing mutations\n f.observer = new MutationObserver(fit(f, DrawState.DIRTY_CONTENT));\n\n // start observing\n f.observer.observe(f.element, f.observeMutations);\n };\n\n // default mutation observer settings\n var mutationObserverDefaultSetting = {\n subtree: true,\n childList: true,\n characterData: true\n };\n\n // default fitty options\n var defaultOptions = {\n minSize: 16,\n maxSize: 512,\n multiLine: true,\n observeMutations: 'MutationObserver' in w ? mutationObserverDefaultSetting : false\n };\n\n // array of elements in, fitty instances out\n function fittyCreate(elements, options) {\n\n // set options object\n var fittyOptions = _extends({}, defaultOptions, options);\n\n // create fitties\n var publicFitties = elements.map(function (element) {\n\n // create fitty instance\n var f = _extends({}, fittyOptions, {\n\n // internal options for this fitty\n element: element,\n active: true\n });\n\n // initialise this fitty\n init(f);\n\n // expose API\n return {\n element: element,\n fit: fit(f, DrawState.DIRTY),\n unfreeze: subscribe(f),\n freeze: unsubscribe(f),\n unsubscribe: destroy(f)\n };\n });\n\n // call redraw on newly initiated fitties\n requestRedraw();\n\n // expose fitties\n return publicFitties;\n }\n\n // fitty creation function\n function fitty(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};\n\n\n // if target is a string\n return typeof target === 'string' ?\n\n // treat it as a querySelector\n fittyCreate(toArray(document.querySelectorAll(target)), options) :\n\n // create single fitty\n fittyCreate([target], options)[0];\n }\n\n // handles viewport changes, redraws all fitties, but only does so after a timeout\n var resizeDebounce = null;\n var onWindowResized = function onWindowResized() {\n w.clearTimeout(resizeDebounce);\n resizeDebounce = w.setTimeout(redrawAll(DrawState.DIRTY_LAYOUT), fitty.observeWindowDelay);\n };\n\n // define observe window property, so when we set it to true or false events are automatically added and removed\n var events = ['resize', 'orientationchange'];\n Object.defineProperty(fitty, 'observeWindow', {\n set: function set(enabled) {\n var method = (enabled ? 'add' : 'remove') + 'EventListener';\n events.forEach(function (e) {\n w[method](e, onWindowResized);\n });\n }\n });\n\n // fitty global properties (by setting observeWindow to true the events above get added)\n fitty.observeWindow = true;\n fitty.observeWindowDelay = 100;\n\n // public fit all method, will force redraw no matter what\n fitty.fitAll = redrawAll(DrawState.DIRTY);\n\n // export our fitty function, we don't want to keep it to our selves\n return fitty;\n}(typeof window === 'undefined' ? null : window);","import { extend, queryAll, closest, getMimeTypeFromFile, encodeRFC3986URI } from '../utils/util.js'\nimport { isMobile } from '../utils/device.js'\n\nimport fitty from 'fitty';\n\n/**\n * Handles loading, unloading and playback of slide\n * content such as images, videos and iframes.\n */\nexport default class SlideContent {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\tthis.startEmbeddedIframe = this.startEmbeddedIframe.bind( this );\n\n\t}\n\n\t/**\n\t * Should the given element be preloaded?\n\t * Decides based on local element attributes and global config.\n\t *\n\t * @param {HTMLElement} element\n\t */\n\tshouldPreload( element ) {\n\n\t\t// Prefer an explicit global preload setting\n\t\tlet preload = this.Reveal.getConfig().preloadIframes;\n\n\t\t// If no global setting is available, fall back on the element's\n\t\t// own preload setting\n\t\tif( typeof preload !== 'boolean' ) {\n\t\t\tpreload = element.hasAttribute( 'data-preload' );\n\t\t}\n\n\t\treturn preload;\n\t}\n\n\t/**\n\t * Called when the given slide is within the configured view\n\t * distance. Shows the slide element and loads any content\n\t * that is set to load lazily (data-src).\n\t *\n\t * @param {HTMLElement} slide Slide to show\n\t */\n\tload( slide, options = {} ) {\n\n\t\t// Show the slide element\n\t\tslide.style.display = this.Reveal.getConfig().display;\n\n\t\t// Media elements with data-src attributes\n\t\tqueryAll( slide, 'img[data-src], video[data-src], audio[data-src], iframe[data-src]' ).forEach( element => {\n\t\t\tif( element.tagName !== 'IFRAME' || this.shouldPreload( element ) ) {\n\t\t\t\telement.setAttribute( 'src', element.getAttribute( 'data-src' ) );\n\t\t\t\telement.setAttribute( 'data-lazy-loaded', '' );\n\t\t\t\telement.removeAttribute( 'data-src' );\n\t\t\t}\n\t\t} );\n\n\t\t// Media elements with children\n\t\tqueryAll( slide, 'video, audio' ).forEach( media => {\n\t\t\tlet sources = 0;\n\n\t\t\tqueryAll( media, 'source[data-src]' ).forEach( source => {\n\t\t\t\tsource.setAttribute( 'src', source.getAttribute( 'data-src' ) );\n\t\t\t\tsource.removeAttribute( 'data-src' );\n\t\t\t\tsource.setAttribute( 'data-lazy-loaded', '' );\n\t\t\t\tsources += 1;\n\t\t\t} );\n\n\t\t\t// Enable inline video playback in mobile Safari\n\t\t\tif( isMobile && media.tagName === 'VIDEO' ) {\n\t\t\t\tmedia.setAttribute( 'playsinline', '' );\n\t\t\t}\n\n\t\t\t// If we rewrote sources for this video/audio element, we need\n\t\t\t// to manually tell it to load from its new origin\n\t\t\tif( sources > 0 ) {\n\t\t\t\tmedia.load();\n\t\t\t}\n\t\t} );\n\n\n\t\t// Show the corresponding background element\n\t\tlet background = slide.slideBackgroundElement;\n\t\tif( background ) {\n\t\t\tbackground.style.display = 'block';\n\n\t\t\tlet backgroundContent = slide.slideBackgroundContentElement;\n\t\t\tlet backgroundIframe = slide.getAttribute( 'data-background-iframe' );\n\n\t\t\t// If the background contains media, load it\n\t\t\tif( background.hasAttribute( 'data-loaded' ) === false ) {\n\t\t\t\tbackground.setAttribute( 'data-loaded', 'true' );\n\n\t\t\t\tlet backgroundImage = slide.getAttribute( 'data-background-image' ),\n\t\t\t\t\tbackgroundVideo = slide.getAttribute( 'data-background-video' ),\n\t\t\t\t\tbackgroundVideoLoop = slide.hasAttribute( 'data-background-video-loop' ),\n\t\t\t\t\tbackgroundVideoMuted = slide.hasAttribute( 'data-background-video-muted' );\n\n\t\t\t\t// Images\n\t\t\t\tif( backgroundImage ) {\n\t\t\t\t\t// base64\n\t\t\t\t\tif( /^data:/.test( backgroundImage.trim() ) ) {\n\t\t\t\t\t\tbackgroundContent.style.backgroundImage = `url(${backgroundImage.trim()})`;\n\t\t\t\t\t}\n\t\t\t\t\t// URL(s)\n\t\t\t\t\telse {\n\t\t\t\t\t\tbackgroundContent.style.backgroundImage = backgroundImage.split( ',' ).map( background => {\n\t\t\t\t\t\t\t// Decode URL(s) that are already encoded first\n\t\t\t\t\t\t\tlet decoded = decodeURI(background.trim());\n\t\t\t\t\t\t\treturn `url(${encodeRFC3986URI(decoded)})`;\n\t\t\t\t\t\t}).join( ',' );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Videos\n\t\t\t\telse if ( backgroundVideo && !this.Reveal.isSpeakerNotes() ) {\n\t\t\t\t\tlet video = document.createElement( 'video' );\n\n\t\t\t\t\tif( backgroundVideoLoop ) {\n\t\t\t\t\t\tvideo.setAttribute( 'loop', '' );\n\t\t\t\t\t}\n\n\t\t\t\t\tif( backgroundVideoMuted ) {\n\t\t\t\t\t\tvideo.muted = true;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Enable inline playback in mobile Safari\n\t\t\t\t\t//\n\t\t\t\t\t// Mute is required for video to play when using\n\t\t\t\t\t// swipe gestures to navigate since they don't\n\t\t\t\t\t// count as direct user actions :'(\n\t\t\t\t\tif( isMobile ) {\n\t\t\t\t\t\tvideo.muted = true;\n\t\t\t\t\t\tvideo.setAttribute( 'playsinline', '' );\n\t\t\t\t\t}\n\n\t\t\t\t\t// Support comma separated lists of video sources\n\t\t\t\t\tbackgroundVideo.split( ',' ).forEach( source => {\n\t\t\t\t\t\tlet type = getMimeTypeFromFile( source );\n\t\t\t\t\t\tif( type ) {\n\t\t\t\t\t\t\tvideo.innerHTML += ``;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tvideo.innerHTML += ``;\n\t\t\t\t\t\t}\n\t\t\t\t\t} );\n\n\t\t\t\t\tbackgroundContent.appendChild( video );\n\t\t\t\t}\n\t\t\t\t// Iframes\n\t\t\t\telse if( backgroundIframe && options.excludeIframes !== true ) {\n\t\t\t\t\tlet iframe = document.createElement( 'iframe' );\n\t\t\t\t\tiframe.setAttribute( 'allowfullscreen', '' );\n\t\t\t\t\tiframe.setAttribute( 'mozallowfullscreen', '' );\n\t\t\t\t\tiframe.setAttribute( 'webkitallowfullscreen', '' );\n\t\t\t\t\tiframe.setAttribute( 'allow', 'autoplay' );\n\n\t\t\t\t\tiframe.setAttribute( 'data-src', backgroundIframe );\n\n\t\t\t\t\tiframe.style.width = '100%';\n\t\t\t\t\tiframe.style.height = '100%';\n\t\t\t\t\tiframe.style.maxHeight = '100%';\n\t\t\t\t\tiframe.style.maxWidth = '100%';\n\n\t\t\t\t\tbackgroundContent.appendChild( iframe );\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Start loading preloadable iframes\n\t\t\tlet backgroundIframeElement = backgroundContent.querySelector( 'iframe[data-src]' );\n\t\t\tif( backgroundIframeElement ) {\n\n\t\t\t\t// Check if this iframe is eligible to be preloaded\n\t\t\t\tif( this.shouldPreload( background ) && !/autoplay=(1|true|yes)/gi.test( backgroundIframe ) ) {\n\t\t\t\t\tif( backgroundIframeElement.getAttribute( 'src' ) !== backgroundIframe ) {\n\t\t\t\t\t\tbackgroundIframeElement.setAttribute( 'src', backgroundIframe );\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t\tthis.layout( slide );\n\n\t}\n\n\t/**\n\t * Applies JS-dependent layout helpers for the scope.\n\t */\n\tlayout( scopeElement ) {\n\n\t\t// Autosize text with the r-fit-text class based on the\n\t\t// size of its container. This needs to happen after the\n\t\t// slide is visible in order to measure the text.\n\t\tArray.from( scopeElement.querySelectorAll( '.r-fit-text' ) ).forEach( element => {\n\t\t\tfitty( element, {\n\t\t\t\tminSize: 24,\n\t\t\t\tmaxSize: this.Reveal.getConfig().height * 0.8,\n\t\t\t\tobserveMutations: false,\n\t\t\t\tobserveWindow: false\n\t\t\t} );\n\t\t} );\n\n\t}\n\n\t/**\n\t * Unloads and hides the given slide. This is called when the\n\t * slide is moved outside of the configured view distance.\n\t *\n\t * @param {HTMLElement} slide\n\t */\n\tunload( slide ) {\n\n\t\t// Hide the slide element\n\t\tslide.style.display = 'none';\n\n\t\t// Hide the corresponding background element\n\t\tlet background = this.Reveal.getSlideBackground( slide );\n\t\tif( background ) {\n\t\t\tbackground.style.display = 'none';\n\n\t\t\t// Unload any background iframes\n\t\t\tqueryAll( background, 'iframe[src]' ).forEach( element => {\n\t\t\t\telement.removeAttribute( 'src' );\n\t\t\t} );\n\t\t}\n\n\t\t// Reset lazy-loaded media elements with src attributes\n\t\tqueryAll( slide, 'video[data-lazy-loaded][src], audio[data-lazy-loaded][src], iframe[data-lazy-loaded][src]' ).forEach( element => {\n\t\t\telement.setAttribute( 'data-src', element.getAttribute( 'src' ) );\n\t\t\telement.removeAttribute( 'src' );\n\t\t} );\n\n\t\t// Reset lazy-loaded media elements with children\n\t\tqueryAll( slide, 'video[data-lazy-loaded] source[src], audio source[src]' ).forEach( source => {\n\t\t\tsource.setAttribute( 'data-src', source.getAttribute( 'src' ) );\n\t\t\tsource.removeAttribute( 'src' );\n\t\t} );\n\n\t}\n\n\t/**\n\t * Enforces origin-specific format rules for embedded media.\n\t */\n\tformatEmbeddedContent() {\n\n\t\tlet _appendParamToIframeSource = ( sourceAttribute, sourceURL, param ) => {\n\t\t\tqueryAll( this.Reveal.getSlidesElement(), 'iframe['+ sourceAttribute +'*=\"'+ sourceURL +'\"]' ).forEach( el => {\n\t\t\t\tlet src = el.getAttribute( sourceAttribute );\n\t\t\t\tif( src && src.indexOf( param ) === -1 ) {\n\t\t\t\t\tel.setAttribute( sourceAttribute, src + ( !/\\?/.test( src ) ? '?' : '&' ) + param );\n\t\t\t\t}\n\t\t\t});\n\t\t};\n\n\t\t// YouTube frames must include \"?enablejsapi=1\"\n\t\t_appendParamToIframeSource( 'src', 'youtube.com/embed/', 'enablejsapi=1' );\n\t\t_appendParamToIframeSource( 'data-src', 'youtube.com/embed/', 'enablejsapi=1' );\n\n\t\t// Vimeo frames must include \"?api=1\"\n\t\t_appendParamToIframeSource( 'src', 'player.vimeo.com/', 'api=1' );\n\t\t_appendParamToIframeSource( 'data-src', 'player.vimeo.com/', 'api=1' );\n\n\t}\n\n\t/**\n\t * Start playback of any embedded content inside of\n\t * the given element.\n\t *\n\t * @param {HTMLElement} element\n\t */\n\tstartEmbeddedContent( element ) {\n\n\t\tif( element && !this.Reveal.isSpeakerNotes() ) {\n\n\t\t\t// Restart GIFs\n\t\t\tqueryAll( element, 'img[src$=\".gif\"]' ).forEach( el => {\n\t\t\t\t// Setting the same unchanged source like this was confirmed\n\t\t\t\t// to work in Chrome, FF & Safari\n\t\t\t\tel.setAttribute( 'src', el.getAttribute( 'src' ) );\n\t\t\t} );\n\n\t\t\t// HTML5 media elements\n\t\t\tqueryAll( element, 'video, audio' ).forEach( el => {\n\t\t\t\tif( closest( el, '.fragment' ) && !closest( el, '.fragment.visible' ) ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\t// Prefer an explicit global autoplay setting\n\t\t\t\tlet autoplay = this.Reveal.getConfig().autoPlayMedia;\n\n\t\t\t\t// If no global setting is available, fall back on the element's\n\t\t\t\t// own autoplay setting\n\t\t\t\tif( typeof autoplay !== 'boolean' ) {\n\t\t\t\t\tautoplay = el.hasAttribute( 'data-autoplay' ) || !!closest( el, '.slide-background' );\n\t\t\t\t}\n\n\t\t\t\tif( autoplay && typeof el.play === 'function' ) {\n\n\t\t\t\t\t// If the media is ready, start playback\n\t\t\t\t\tif( el.readyState > 1 ) {\n\t\t\t\t\t\tthis.startEmbeddedMedia( { target: el } );\n\t\t\t\t\t}\n\t\t\t\t\t// Mobile devices never fire a loaded event so instead\n\t\t\t\t\t// of waiting, we initiate playback\n\t\t\t\t\telse if( isMobile ) {\n\t\t\t\t\t\tlet promise = el.play();\n\n\t\t\t\t\t\t// If autoplay does not work, ensure that the controls are visible so\n\t\t\t\t\t\t// that the viewer can start the media on their own\n\t\t\t\t\t\tif( promise && typeof promise.catch === 'function' && el.controls === false ) {\n\t\t\t\t\t\t\tpromise.catch( () => {\n\t\t\t\t\t\t\t\tel.controls = true;\n\n\t\t\t\t\t\t\t\t// Once the video does start playing, hide the controls again\n\t\t\t\t\t\t\t\tel.addEventListener( 'play', () => {\n\t\t\t\t\t\t\t\t\tel.controls = false;\n\t\t\t\t\t\t\t\t} );\n\t\t\t\t\t\t\t} );\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// If the media isn't loaded, wait before playing\n\t\t\t\t\telse {\n\t\t\t\t\t\tel.removeEventListener( 'loadeddata', this.startEmbeddedMedia ); // remove first to avoid dupes\n\t\t\t\t\t\tel.addEventListener( 'loadeddata', this.startEmbeddedMedia );\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t} );\n\n\t\t\t// Normal iframes\n\t\t\tqueryAll( element, 'iframe[src]' ).forEach( el => {\n\t\t\t\tif( closest( el, '.fragment' ) && !closest( el, '.fragment.visible' ) ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tthis.startEmbeddedIframe( { target: el } );\n\t\t\t} );\n\n\t\t\t// Lazy loading iframes\n\t\t\tqueryAll( element, 'iframe[data-src]' ).forEach( el => {\n\t\t\t\tif( closest( el, '.fragment' ) && !closest( el, '.fragment.visible' ) ) {\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tif( el.getAttribute( 'src' ) !== el.getAttribute( 'data-src' ) ) {\n\t\t\t\t\tel.removeEventListener( 'load', this.startEmbeddedIframe ); // remove first to avoid dupes\n\t\t\t\t\tel.addEventListener( 'load', this.startEmbeddedIframe );\n\t\t\t\t\tel.setAttribute( 'src', el.getAttribute( 'data-src' ) );\n\t\t\t\t}\n\t\t\t} );\n\n\t\t}\n\n\t}\n\n\t/**\n\t * Starts playing an embedded video/audio element after\n\t * it has finished loading.\n\t *\n\t * @param {object} event\n\t */\n\tstartEmbeddedMedia( event ) {\n\n\t\tlet isAttachedToDOM = !!closest( event.target, 'html' ),\n\t\t\tisVisible \t\t= !!closest( event.target, '.present' );\n\n\t\tif( isAttachedToDOM && isVisible ) {\n\t\t\tevent.target.currentTime = 0;\n\t\t\tevent.target.play();\n\t\t}\n\n\t\tevent.target.removeEventListener( 'loadeddata', this.startEmbeddedMedia );\n\n\t}\n\n\t/**\n\t * \"Starts\" the content of an embedded iframe using the\n\t * postMessage API.\n\t *\n\t * @param {object} event\n\t */\n\tstartEmbeddedIframe( event ) {\n\n\t\tlet iframe = event.target;\n\n\t\tif( iframe && iframe.contentWindow ) {\n\n\t\t\tlet isAttachedToDOM = !!closest( event.target, 'html' ),\n\t\t\t\tisVisible \t\t= !!closest( event.target, '.present' );\n\n\t\t\tif( isAttachedToDOM && isVisible ) {\n\n\t\t\t\t// Prefer an explicit global autoplay setting\n\t\t\t\tlet autoplay = this.Reveal.getConfig().autoPlayMedia;\n\n\t\t\t\t// If no global setting is available, fall back on the element's\n\t\t\t\t// own autoplay setting\n\t\t\t\tif( typeof autoplay !== 'boolean' ) {\n\t\t\t\t\tautoplay = iframe.hasAttribute( 'data-autoplay' ) || !!closest( iframe, '.slide-background' );\n\t\t\t\t}\n\n\t\t\t\t// YouTube postMessage API\n\t\t\t\tif( /youtube\\.com\\/embed\\//.test( iframe.getAttribute( 'src' ) ) && autoplay ) {\n\t\t\t\t\tiframe.contentWindow.postMessage( '{\"event\":\"command\",\"func\":\"playVideo\",\"args\":\"\"}', '*' );\n\t\t\t\t}\n\t\t\t\t// Vimeo postMessage API\n\t\t\t\telse if( /player\\.vimeo\\.com\\//.test( iframe.getAttribute( 'src' ) ) && autoplay ) {\n\t\t\t\t\tiframe.contentWindow.postMessage( '{\"method\":\"play\"}', '*' );\n\t\t\t\t}\n\t\t\t\t// Generic postMessage API\n\t\t\t\telse {\n\t\t\t\t\tiframe.contentWindow.postMessage( 'slide:start', '*' );\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t/**\n\t * Stop playback of any embedded content inside of\n\t * the targeted slide.\n\t *\n\t * @param {HTMLElement} element\n\t */\n\tstopEmbeddedContent( element, options = {} ) {\n\n\t\toptions = extend( {\n\t\t\t// Defaults\n\t\t\tunloadIframes: true\n\t\t}, options );\n\n\t\tif( element && element.parentNode ) {\n\t\t\t// HTML5 media elements\n\t\t\tqueryAll( element, 'video, audio' ).forEach( el => {\n\t\t\t\tif( !el.hasAttribute( 'data-ignore' ) && typeof el.pause === 'function' ) {\n\t\t\t\t\tel.setAttribute('data-paused-by-reveal', '');\n\t\t\t\t\tel.pause();\n\t\t\t\t}\n\t\t\t} );\n\n\t\t\t// Generic postMessage API for non-lazy loaded iframes\n\t\t\tqueryAll( element, 'iframe' ).forEach( el => {\n\t\t\t\tif( el.contentWindow ) el.contentWindow.postMessage( 'slide:stop', '*' );\n\t\t\t\tel.removeEventListener( 'load', this.startEmbeddedIframe );\n\t\t\t});\n\n\t\t\t// YouTube postMessage API\n\t\t\tqueryAll( element, 'iframe[src*=\"youtube.com/embed/\"]' ).forEach( el => {\n\t\t\t\tif( !el.hasAttribute( 'data-ignore' ) && el.contentWindow && typeof el.contentWindow.postMessage === 'function' ) {\n\t\t\t\t\tel.contentWindow.postMessage( '{\"event\":\"command\",\"func\":\"pauseVideo\",\"args\":\"\"}', '*' );\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// Vimeo postMessage API\n\t\t\tqueryAll( element, 'iframe[src*=\"player.vimeo.com/\"]' ).forEach( el => {\n\t\t\t\tif( !el.hasAttribute( 'data-ignore' ) && el.contentWindow && typeof el.contentWindow.postMessage === 'function' ) {\n\t\t\t\t\tel.contentWindow.postMessage( '{\"method\":\"pause\"}', '*' );\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tif( options.unloadIframes === true ) {\n\t\t\t\t// Unload lazy-loaded iframes\n\t\t\t\tqueryAll( element, 'iframe[data-src]' ).forEach( el => {\n\t\t\t\t\t// Only removing the src doesn't actually unload the frame\n\t\t\t\t\t// in all browsers (Firefox) so we set it to blank first\n\t\t\t\t\tel.setAttribute( 'src', 'about:blank' );\n\t\t\t\t\tel.removeAttribute( 'src' );\n\t\t\t\t} );\n\t\t\t}\n\t\t}\n\n\t}\n\n}\n","/**\n * Handles the display of reveal.js' optional slide number.\n */\nexport default class SlideNumber {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t}\n\n\trender() {\n\n\t\tthis.element = document.createElement( 'div' );\n\t\tthis.element.className = 'slide-number';\n\t\tthis.Reveal.getRevealElement().appendChild( this.element );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tlet slideNumberDisplay = 'none';\n\t\tif( config.slideNumber && !this.Reveal.isPrintingPDF() ) {\n\t\t\tif( config.showSlideNumber === 'all' ) {\n\t\t\t\tslideNumberDisplay = 'block';\n\t\t\t}\n\t\t\telse if( config.showSlideNumber === 'speaker' && this.Reveal.isSpeakerNotes() ) {\n\t\t\t\tslideNumberDisplay = 'block';\n\t\t\t}\n\t\t}\n\n\t\tthis.element.style.display = slideNumberDisplay;\n\n\t}\n\n\t/**\n\t * Updates the slide number to match the current slide.\n\t */\n\tupdate() {\n\n\t\t// Update slide number if enabled\n\t\tif( this.Reveal.getConfig().slideNumber && this.element ) {\n\t\t\tthis.element.innerHTML = this.getSlideNumber();\n\t\t}\n\n\t}\n\n\t/**\n\t * Returns the HTML string corresponding to the current slide\n\t * number, including formatting.\n\t */\n\tgetSlideNumber( slide = this.Reveal.getCurrentSlide() ) {\n\n\t\tlet config = this.Reveal.getConfig();\n\t\tlet value;\n\t\tlet format = 'h.v';\n\n\t\tif ( typeof config.slideNumber === 'function' ) {\n\t\t\tvalue = config.slideNumber( slide );\n\t\t} else {\n\t\t\t// Check if a custom number format is available\n\t\t\tif( typeof config.slideNumber === 'string' ) {\n\t\t\t\tformat = config.slideNumber;\n\t\t\t}\n\n\t\t\t// If there are ONLY vertical slides in this deck, always use\n\t\t\t// a flattened slide number\n\t\t\tif( !/c/.test( format ) && this.Reveal.getHorizontalSlides().length === 1 ) {\n\t\t\t\tformat = 'c';\n\t\t\t}\n\n\t\t\t// Offset the current slide number by 1 to make it 1-indexed\n\t\t\tlet horizontalOffset = slide && slide.dataset.visibility === 'uncounted' ? 0 : 1;\n\n\t\t\tvalue = [];\n\t\t\tswitch( format ) {\n\t\t\t\tcase 'c':\n\t\t\t\t\tvalue.push( this.Reveal.getSlidePastCount( slide ) + horizontalOffset );\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'c/t':\n\t\t\t\t\tvalue.push( this.Reveal.getSlidePastCount( slide ) + horizontalOffset, '/', this.Reveal.getTotalSlides() );\n\t\t\t\t\tbreak;\n\t\t\t\tdefault:\n\t\t\t\t\tlet indices = this.Reveal.getIndices( slide );\n\t\t\t\t\tvalue.push( indices.h + horizontalOffset );\n\t\t\t\t\tlet sep = format === 'h/v' ? '/' : '.';\n\t\t\t\t\tif( this.Reveal.isVerticalSlide( slide ) ) value.push( sep, indices.v + 1 );\n\t\t\t}\n\t\t}\n\n\t\tlet url = '#' + this.Reveal.location.getHash( slide );\n\t\treturn this.formatNumber( value[0], value[1], value[2], url );\n\n\t}\n\n\t/**\n\t * Applies HTML formatting to a slide number before it's\n\t * written to the DOM.\n\t *\n\t * @param {number} a Current slide\n\t * @param {string} delimiter Character to separate slide numbers\n\t * @param {(number|*)} b Total slides\n\t * @param {HTMLElement} [url='#'+locationHash()] The url to link to\n\t * @return {string} HTML string fragment\n\t */\n\tformatNumber( a, delimiter, b, url = '#' + this.Reveal.location.getHash() ) {\n\n\t\tif( typeof b === 'number' && !isNaN( b ) ) {\n\t\t\treturn `\n\t\t\t\t\t${a}\n\t\t\t\t\t${delimiter}\n\t\t\t\t\t${b}\n\t\t\t\t\t`;\n\t\t}\n\t\telse {\n\t\t\treturn `\n\t\t\t\t\t${a}\n\t\t\t\t\t`;\n\t\t}\n\n\t}\n\n\tdestroy() {\n\n\t\tthis.element.remove();\n\n\t}\n\n}","/**\n * Makes it possible to jump to a slide by entering its\n * slide number or id.\n */\nexport default class JumpToSlide {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\tthis.onInput = this.onInput.bind( this );\n\t\tthis.onBlur = this.onBlur.bind( this );\n\t\tthis.onKeyDown = this.onKeyDown.bind( this );\n\n\t}\n\n\trender() {\n\n\t\tthis.element = document.createElement( 'div' );\n\t\tthis.element.className = 'jump-to-slide';\n\n this.jumpInput = document.createElement( 'input' );\n this.jumpInput.type = 'text';\n this.jumpInput.className = 'jump-to-slide-input';\n this.jumpInput.placeholder = 'Jump to slide';\n\t\tthis.jumpInput.addEventListener( 'input', this.onInput );\n\t\tthis.jumpInput.addEventListener( 'keydown', this.onKeyDown );\n\t\tthis.jumpInput.addEventListener( 'blur', this.onBlur );\n\n this.element.appendChild( this.jumpInput );\n\n\t}\n\n\tshow() {\n\n\t\tthis.indicesOnShow = this.Reveal.getIndices();\n\n\t\tthis.Reveal.getRevealElement().appendChild( this.element );\n\t\tthis.jumpInput.focus();\n\n\t}\n\n\thide() {\n\n\t\tif( this.isVisible() ) {\n\t\t\tthis.element.remove();\n\t\t\tthis.jumpInput.value = '';\n\n\t\t\tclearTimeout( this.jumpTimeout );\n\t\t\tdelete this.jumpTimeout;\n\t\t}\n\n\t}\n\n\tisVisible() {\n\n\t\treturn !!this.element.parentNode;\n\n\t}\n\n\t/**\n\t * Parses the current input and jumps to the given slide.\n\t */\n\tjump() {\n\n\t\tclearTimeout( this.jumpTimeout );\n\t\tdelete this.jumpTimeout;\n\n\t\tconst query = this.jumpInput.value.trim( '' );\n\t\tlet indices = this.Reveal.location.getIndicesFromHash( query, { oneBasedIndex: true } );\n\n\t\t// If no valid index was found and the input query is a\n\t\t// string, fall back on a simple search\n\t\tif( !indices && /\\S+/i.test( query ) && query.length > 1 ) {\n\t\t\tindices = this.search( query );\n\t\t}\n\n\t\tif( indices && query !== '' ) {\n\t\t\tthis.Reveal.slide( indices.h, indices.v, indices.f );\n\t\t\treturn true;\n\t\t}\n\t\telse {\n\t\t\tthis.Reveal.slide( this.indicesOnShow.h, this.indicesOnShow.v, this.indicesOnShow.f );\n\t\t\treturn false;\n\t\t}\n\n\t}\n\n\tjumpAfter( delay ) {\n\n\t\tclearTimeout( this.jumpTimeout );\n\t\tthis.jumpTimeout = setTimeout( () => this.jump(), delay );\n\n\t}\n\n\t/**\n\t * A lofi search that looks for the given query in all\n\t * of our slides and returns the first match.\n\t */\n\tsearch( query ) {\n\n\t\tconst regex = new RegExp( '\\\\b' + query.trim() + '\\\\b', 'i' );\n\n\t\tconst slide = this.Reveal.getSlides().find( ( slide ) => {\n\t\t\treturn regex.test( slide.innerText );\n\t\t} );\n\n\t\tif( slide ) {\n\t\t\treturn this.Reveal.getIndices( slide );\n\t\t}\n\t\telse {\n\t\t\treturn null;\n\t\t}\n\n\t}\n\n\t/**\n\t * Reverts back to the slide we were on when jump to slide was\n\t * invoked.\n\t */\n\tcancel() {\n\n\t\tthis.Reveal.slide( this.indicesOnShow.h, this.indicesOnShow.v, this.indicesOnShow.f );\n\t\tthis.hide();\n\n\t}\n\n\tconfirm() {\n\n\t\tthis.jump();\n\t\tthis.hide();\n\n\t}\n\n\tdestroy() {\n\n\t\tthis.jumpInput.removeEventListener( 'input', this.onInput );\n\t\tthis.jumpInput.removeEventListener( 'keydown', this.onKeyDown );\n\t\tthis.jumpInput.removeEventListener( 'blur', this.onBlur );\n\n\t\tthis.element.remove();\n\n\t}\n\n\tonKeyDown( event ) {\n\n\t\tif( event.keyCode === 13 ) {\n\t\t\tthis.confirm();\n\t\t}\n\t\telse if( event.keyCode === 27 ) {\n\t\t\tthis.cancel();\n\n\t\t\tevent.stopImmediatePropagation();\n\t\t}\n\n\t}\n\n\tonInput( event ) {\n\n\t\tthis.jumpAfter( 200 );\n\n\t}\n\n\tonBlur() {\n\n\t\tsetTimeout( () => this.hide(), 1 );\n\n\t}\n\n}","/**\n * Converts various color input formats to an {r:0,g:0,b:0} object.\n *\n * @param {string} color The string representation of a color\n * @example\n * colorToRgb('#000');\n * @example\n * colorToRgb('#000000');\n * @example\n * colorToRgb('rgb(0,0,0)');\n * @example\n * colorToRgb('rgba(0,0,0)');\n *\n * @return {{r: number, g: number, b: number, [a]: number}|null}\n */\nexport const colorToRgb = ( color ) => {\n\n\tlet hex3 = color.match( /^#([0-9a-f]{3})$/i );\n\tif( hex3 && hex3[1] ) {\n\t\thex3 = hex3[1];\n\t\treturn {\n\t\t\tr: parseInt( hex3.charAt( 0 ), 16 ) * 0x11,\n\t\t\tg: parseInt( hex3.charAt( 1 ), 16 ) * 0x11,\n\t\t\tb: parseInt( hex3.charAt( 2 ), 16 ) * 0x11\n\t\t};\n\t}\n\n\tlet hex6 = color.match( /^#([0-9a-f]{6})$/i );\n\tif( hex6 && hex6[1] ) {\n\t\thex6 = hex6[1];\n\t\treturn {\n\t\t\tr: parseInt( hex6.slice( 0, 2 ), 16 ),\n\t\t\tg: parseInt( hex6.slice( 2, 4 ), 16 ),\n\t\t\tb: parseInt( hex6.slice( 4, 6 ), 16 )\n\t\t};\n\t}\n\n\tlet rgb = color.match( /^rgb\\s*\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*,\\s*(\\d+)\\s*\\)$/i );\n\tif( rgb ) {\n\t\treturn {\n\t\t\tr: parseInt( rgb[1], 10 ),\n\t\t\tg: parseInt( rgb[2], 10 ),\n\t\t\tb: parseInt( rgb[3], 10 )\n\t\t};\n\t}\n\n\tlet rgba = color.match( /^rgba\\s*\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*,\\s*(\\d+)\\s*\\,\\s*([\\d]+|[\\d]*.[\\d]+)\\s*\\)$/i );\n\tif( rgba ) {\n\t\treturn {\n\t\t\tr: parseInt( rgba[1], 10 ),\n\t\t\tg: parseInt( rgba[2], 10 ),\n\t\t\tb: parseInt( rgba[3], 10 ),\n\t\t\ta: parseFloat( rgba[4] )\n\t\t};\n\t}\n\n\treturn null;\n\n}\n\n/**\n * Calculates brightness on a scale of 0-255.\n *\n * @param {string} color See colorToRgb for supported formats.\n * @see {@link colorToRgb}\n */\nexport const colorBrightness = ( color ) => {\n\n\tif( typeof color === 'string' ) color = colorToRgb( color );\n\n\tif( color ) {\n\t\treturn ( color.r * 299 + color.g * 587 + color.b * 114 ) / 1000;\n\t}\n\n\treturn null;\n\n}","import { queryAll } from '../utils/util.js'\nimport { colorToRgb, colorBrightness } from '../utils/color.js'\n\n/**\n * Creates and updates slide backgrounds.\n */\nexport default class Backgrounds {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t}\n\n\trender() {\n\n\t\tthis.element = document.createElement( 'div' );\n\t\tthis.element.className = 'backgrounds';\n\t\tthis.Reveal.getRevealElement().appendChild( this.element );\n\n\t}\n\n\t/**\n\t * Creates the slide background elements and appends them\n\t * to the background container. One element is created per\n\t * slide no matter if the given slide has visible background.\n\t */\n\tcreate() {\n\n\t\t// Clear prior backgrounds\n\t\tthis.element.innerHTML = '';\n\t\tthis.element.classList.add( 'no-transition' );\n\n\t\t// Iterate over all horizontal slides\n\t\tthis.Reveal.getHorizontalSlides().forEach( slideh => {\n\n\t\t\tlet backgroundStack = this.createBackground( slideh, this.element );\n\n\t\t\t// Iterate over all vertical slides\n\t\t\tqueryAll( slideh, 'section' ).forEach( slidev => {\n\n\t\t\t\tthis.createBackground( slidev, backgroundStack );\n\n\t\t\t\tbackgroundStack.classList.add( 'stack' );\n\n\t\t\t} );\n\n\t\t} );\n\n\t\t// Add parallax background if specified\n\t\tif( this.Reveal.getConfig().parallaxBackgroundImage ) {\n\n\t\t\tthis.element.style.backgroundImage = 'url(\"' + this.Reveal.getConfig().parallaxBackgroundImage + '\")';\n\t\t\tthis.element.style.backgroundSize = this.Reveal.getConfig().parallaxBackgroundSize;\n\t\t\tthis.element.style.backgroundRepeat = this.Reveal.getConfig().parallaxBackgroundRepeat;\n\t\t\tthis.element.style.backgroundPosition = this.Reveal.getConfig().parallaxBackgroundPosition;\n\n\t\t\t// Make sure the below properties are set on the element - these properties are\n\t\t\t// needed for proper transitions to be set on the element via CSS. To remove\n\t\t\t// annoying background slide-in effect when the presentation starts, apply\n\t\t\t// these properties after short time delay\n\t\t\tsetTimeout( () => {\n\t\t\t\tthis.Reveal.getRevealElement().classList.add( 'has-parallax-background' );\n\t\t\t}, 1 );\n\n\t\t}\n\t\telse {\n\n\t\t\tthis.element.style.backgroundImage = '';\n\t\t\tthis.Reveal.getRevealElement().classList.remove( 'has-parallax-background' );\n\n\t\t}\n\n\t}\n\n\t/**\n\t * Creates a background for the given slide.\n\t *\n\t * @param {HTMLElement} slide\n\t * @param {HTMLElement} container The element that the background\n\t * should be appended to\n\t * @return {HTMLElement} New background div\n\t */\n\tcreateBackground( slide, container ) {\n\n\t\t// Main slide background element\n\t\tlet element = document.createElement( 'div' );\n\t\telement.className = 'slide-background ' + slide.className.replace( /present|past|future/, '' );\n\n\t\t// Inner background element that wraps images/videos/iframes\n\t\tlet contentElement = document.createElement( 'div' );\n\t\tcontentElement.className = 'slide-background-content';\n\n\t\telement.appendChild( contentElement );\n\t\tcontainer.appendChild( element );\n\n\t\tslide.slideBackgroundElement = element;\n\t\tslide.slideBackgroundContentElement = contentElement;\n\n\t\t// Syncs the background to reflect all current background settings\n\t\tthis.sync( slide );\n\n\t\treturn element;\n\n\t}\n\n\t/**\n\t * Renders all of the visual properties of a slide background\n\t * based on the various background attributes.\n\t *\n\t * @param {HTMLElement} slide\n\t */\n\tsync( slide ) {\n\n\t\tconst element = slide.slideBackgroundElement,\n\t\t\tcontentElement = slide.slideBackgroundContentElement;\n\n\t\tconst data = {\n\t\t\tbackground: slide.getAttribute( 'data-background' ),\n\t\t\tbackgroundSize: slide.getAttribute( 'data-background-size' ),\n\t\t\tbackgroundImage: slide.getAttribute( 'data-background-image' ),\n\t\t\tbackgroundVideo: slide.getAttribute( 'data-background-video' ),\n\t\t\tbackgroundIframe: slide.getAttribute( 'data-background-iframe' ),\n\t\t\tbackgroundColor: slide.getAttribute( 'data-background-color' ),\n\t\t\tbackgroundGradient: slide.getAttribute( 'data-background-gradient' ),\n\t\t\tbackgroundRepeat: slide.getAttribute( 'data-background-repeat' ),\n\t\t\tbackgroundPosition: slide.getAttribute( 'data-background-position' ),\n\t\t\tbackgroundTransition: slide.getAttribute( 'data-background-transition' ),\n\t\t\tbackgroundOpacity: slide.getAttribute( 'data-background-opacity' ),\n\t\t};\n\n\t\tconst dataPreload = slide.hasAttribute( 'data-preload' );\n\n\t\t// Reset the prior background state in case this is not the\n\t\t// initial sync\n\t\tslide.classList.remove( 'has-dark-background' );\n\t\tslide.classList.remove( 'has-light-background' );\n\n\t\telement.removeAttribute( 'data-loaded' );\n\t\telement.removeAttribute( 'data-background-hash' );\n\t\telement.removeAttribute( 'data-background-size' );\n\t\telement.removeAttribute( 'data-background-transition' );\n\t\telement.style.backgroundColor = '';\n\n\t\tcontentElement.style.backgroundSize = '';\n\t\tcontentElement.style.backgroundRepeat = '';\n\t\tcontentElement.style.backgroundPosition = '';\n\t\tcontentElement.style.backgroundImage = '';\n\t\tcontentElement.style.opacity = '';\n\t\tcontentElement.innerHTML = '';\n\n\t\tif( data.background ) {\n\t\t\t// Auto-wrap image urls in url(...)\n\t\t\tif( /^(http|file|\\/\\/)/gi.test( data.background ) || /\\.(svg|png|jpg|jpeg|gif|bmp|webp)([?#\\s]|$)/gi.test( data.background ) ) {\n\t\t\t\tslide.setAttribute( 'data-background-image', data.background );\n\t\t\t}\n\t\t\telse {\n\t\t\t\telement.style.background = data.background;\n\t\t\t}\n\t\t}\n\n\t\t// Create a hash for this combination of background settings.\n\t\t// This is used to determine when two slide backgrounds are\n\t\t// the same.\n\t\tif( data.background || data.backgroundColor || data.backgroundGradient || data.backgroundImage || data.backgroundVideo || data.backgroundIframe ) {\n\t\t\telement.setAttribute( 'data-background-hash', data.background +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundSize +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundImage +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundVideo +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundIframe +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundColor +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundGradient +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundRepeat +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundPosition +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundTransition +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdata.backgroundOpacity );\n\t\t}\n\n\t\t// Additional and optional background properties\n\t\tif( data.backgroundSize ) element.setAttribute( 'data-background-size', data.backgroundSize );\n\t\tif( data.backgroundColor ) element.style.backgroundColor = data.backgroundColor;\n\t\tif( data.backgroundGradient ) element.style.backgroundImage = data.backgroundGradient;\n\t\tif( data.backgroundTransition ) element.setAttribute( 'data-background-transition', data.backgroundTransition );\n\n\t\tif( dataPreload ) element.setAttribute( 'data-preload', '' );\n\n\t\t// Background image options are set on the content wrapper\n\t\tif( data.backgroundSize ) contentElement.style.backgroundSize = data.backgroundSize;\n\t\tif( data.backgroundRepeat ) contentElement.style.backgroundRepeat = data.backgroundRepeat;\n\t\tif( data.backgroundPosition ) contentElement.style.backgroundPosition = data.backgroundPosition;\n\t\tif( data.backgroundOpacity ) contentElement.style.opacity = data.backgroundOpacity;\n\n\t\t// If this slide has a background color, we add a class that\n\t\t// signals if it is light or dark. If the slide has no background\n\t\t// color, no class will be added\n\t\tlet contrastColor = data.backgroundColor;\n\n\t\t// If no bg color was found, or it cannot be converted by colorToRgb, check the computed background\n\t\tif( !contrastColor || !colorToRgb( contrastColor ) ) {\n\t\t\tlet computedBackgroundStyle = window.getComputedStyle( element );\n\t\t\tif( computedBackgroundStyle && computedBackgroundStyle.backgroundColor ) {\n\t\t\t\tcontrastColor = computedBackgroundStyle.backgroundColor;\n\t\t\t}\n\t\t}\n\n\t\tif( contrastColor ) {\n\t\t\tconst rgb = colorToRgb( contrastColor );\n\n\t\t\t// Ignore fully transparent backgrounds. Some browsers return\n\t\t\t// rgba(0,0,0,0) when reading the computed background color of\n\t\t\t// an element with no background\n\t\t\tif( rgb && rgb.a !== 0 ) {\n\t\t\t\tif( colorBrightness( contrastColor ) < 128 ) {\n\t\t\t\t\tslide.classList.add( 'has-dark-background' );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tslide.classList.add( 'has-light-background' );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\t/**\n\t * Updates the background elements to reflect the current\n\t * slide.\n\t *\n\t * @param {boolean} includeAll If true, the backgrounds of\n\t * all vertical slides (not just the present) will be updated.\n\t */\n\tupdate( includeAll = false ) {\n\n\t\tlet currentSlide = this.Reveal.getCurrentSlide();\n\t\tlet indices = this.Reveal.getIndices();\n\n\t\tlet currentBackground = null;\n\n\t\t// Reverse past/future classes when in RTL mode\n\t\tlet horizontalPast = this.Reveal.getConfig().rtl ? 'future' : 'past',\n\t\t\thorizontalFuture = this.Reveal.getConfig().rtl ? 'past' : 'future';\n\n\t\t// Update the classes of all backgrounds to match the\n\t\t// states of their slides (past/present/future)\n\t\tArray.from( this.element.childNodes ).forEach( ( backgroundh, h ) => {\n\n\t\t\tbackgroundh.classList.remove( 'past', 'present', 'future' );\n\n\t\t\tif( h < indices.h ) {\n\t\t\t\tbackgroundh.classList.add( horizontalPast );\n\t\t\t}\n\t\t\telse if ( h > indices.h ) {\n\t\t\t\tbackgroundh.classList.add( horizontalFuture );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbackgroundh.classList.add( 'present' );\n\n\t\t\t\t// Store a reference to the current background element\n\t\t\t\tcurrentBackground = backgroundh;\n\t\t\t}\n\n\t\t\tif( includeAll || h === indices.h ) {\n\t\t\t\tqueryAll( backgroundh, '.slide-background' ).forEach( ( backgroundv, v ) => {\n\n\t\t\t\t\tbackgroundv.classList.remove( 'past', 'present', 'future' );\n\n\t\t\t\t\tif( v < indices.v ) {\n\t\t\t\t\t\tbackgroundv.classList.add( 'past' );\n\t\t\t\t\t}\n\t\t\t\t\telse if ( v > indices.v ) {\n\t\t\t\t\t\tbackgroundv.classList.add( 'future' );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tbackgroundv.classList.add( 'present' );\n\n\t\t\t\t\t\t// Only if this is the present horizontal and vertical slide\n\t\t\t\t\t\tif( h === indices.h ) currentBackground = backgroundv;\n\t\t\t\t\t}\n\n\t\t\t\t} );\n\t\t\t}\n\n\t\t} );\n\n\t\t// Stop content inside of previous backgrounds\n\t\tif( this.previousBackground ) {\n\n\t\t\tthis.Reveal.slideContent.stopEmbeddedContent( this.previousBackground, { unloadIframes: !this.Reveal.slideContent.shouldPreload( this.previousBackground ) } );\n\n\t\t}\n\n\t\t// Start content in the current background\n\t\tif( currentBackground ) {\n\n\t\t\tthis.Reveal.slideContent.startEmbeddedContent( currentBackground );\n\n\t\t\tlet currentBackgroundContent = currentBackground.querySelector( '.slide-background-content' );\n\t\t\tif( currentBackgroundContent ) {\n\n\t\t\t\tlet backgroundImageURL = currentBackgroundContent.style.backgroundImage || '';\n\n\t\t\t\t// Restart GIFs (doesn't work in Firefox)\n\t\t\t\tif( /\\.gif/i.test( backgroundImageURL ) ) {\n\t\t\t\t\tcurrentBackgroundContent.style.backgroundImage = '';\n\t\t\t\t\twindow.getComputedStyle( currentBackgroundContent ).opacity;\n\t\t\t\t\tcurrentBackgroundContent.style.backgroundImage = backgroundImageURL;\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// Don't transition between identical backgrounds. This\n\t\t\t// prevents unwanted flicker.\n\t\t\tlet previousBackgroundHash = this.previousBackground ? this.previousBackground.getAttribute( 'data-background-hash' ) : null;\n\t\t\tlet currentBackgroundHash = currentBackground.getAttribute( 'data-background-hash' );\n\t\t\tif( currentBackgroundHash && currentBackgroundHash === previousBackgroundHash && currentBackground !== this.previousBackground ) {\n\t\t\t\tthis.element.classList.add( 'no-transition' );\n\t\t\t}\n\n\t\t\tthis.previousBackground = currentBackground;\n\n\t\t}\n\n\t\t// If there's a background brightness flag for this slide,\n\t\t// bubble it to the .reveal container\n\t\tif( currentSlide ) {\n\t\t\t[ 'has-light-background', 'has-dark-background' ].forEach( classToBubble => {\n\t\t\t\tif( currentSlide.classList.contains( classToBubble ) ) {\n\t\t\t\t\tthis.Reveal.getRevealElement().classList.add( classToBubble );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.Reveal.getRevealElement().classList.remove( classToBubble );\n\t\t\t\t}\n\t\t\t}, this );\n\t\t}\n\n\t\t// Allow the first background to apply without transition\n\t\tsetTimeout( () => {\n\t\t\tthis.element.classList.remove( 'no-transition' );\n\t\t}, 1 );\n\n\t}\n\n\t/**\n\t * Updates the position of the parallax background based\n\t * on the current slide index.\n\t */\n\tupdateParallax() {\n\n\t\tlet indices = this.Reveal.getIndices();\n\n\t\tif( this.Reveal.getConfig().parallaxBackgroundImage ) {\n\n\t\t\tlet horizontalSlides = this.Reveal.getHorizontalSlides(),\n\t\t\t\tverticalSlides = this.Reveal.getVerticalSlides();\n\n\t\t\tlet backgroundSize = this.element.style.backgroundSize.split( ' ' ),\n\t\t\t\tbackgroundWidth, backgroundHeight;\n\n\t\t\tif( backgroundSize.length === 1 ) {\n\t\t\t\tbackgroundWidth = backgroundHeight = parseInt( backgroundSize[0], 10 );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tbackgroundWidth = parseInt( backgroundSize[0], 10 );\n\t\t\t\tbackgroundHeight = parseInt( backgroundSize[1], 10 );\n\t\t\t}\n\n\t\t\tlet slideWidth = this.element.offsetWidth,\n\t\t\t\thorizontalSlideCount = horizontalSlides.length,\n\t\t\t\thorizontalOffsetMultiplier,\n\t\t\t\thorizontalOffset;\n\n\t\t\tif( typeof this.Reveal.getConfig().parallaxBackgroundHorizontal === 'number' ) {\n\t\t\t\thorizontalOffsetMultiplier = this.Reveal.getConfig().parallaxBackgroundHorizontal;\n\t\t\t}\n\t\t\telse {\n\t\t\t\thorizontalOffsetMultiplier = horizontalSlideCount > 1 ? ( backgroundWidth - slideWidth ) / ( horizontalSlideCount-1 ) : 0;\n\t\t\t}\n\n\t\t\thorizontalOffset = horizontalOffsetMultiplier * indices.h * -1;\n\n\t\t\tlet slideHeight = this.element.offsetHeight,\n\t\t\t\tverticalSlideCount = verticalSlides.length,\n\t\t\t\tverticalOffsetMultiplier,\n\t\t\t\tverticalOffset;\n\n\t\t\tif( typeof this.Reveal.getConfig().parallaxBackgroundVertical === 'number' ) {\n\t\t\t\tverticalOffsetMultiplier = this.Reveal.getConfig().parallaxBackgroundVertical;\n\t\t\t}\n\t\t\telse {\n\t\t\t\tverticalOffsetMultiplier = ( backgroundHeight - slideHeight ) / ( verticalSlideCount-1 );\n\t\t\t}\n\n\t\t\tverticalOffset = verticalSlideCount > 0 ? verticalOffsetMultiplier * indices.v : 0;\n\n\t\t\tthis.element.style.backgroundPosition = horizontalOffset + 'px ' + -verticalOffset + 'px';\n\n\t\t}\n\n\t}\n\n\tdestroy() {\n\n\t\tthis.element.remove();\n\n\t}\n\n}\n","\nexport const SLIDES_SELECTOR = '.slides section';\nexport const HORIZONTAL_SLIDES_SELECTOR = '.slides>section';\nexport const VERTICAL_SLIDES_SELECTOR = '.slides>section.present>section';\n\n// Methods that may not be invoked via the postMessage API\nexport const POST_MESSAGE_METHOD_BLACKLIST = /registerPlugin|registerKeyboardShortcut|addKeyBinding|addEventListener|showPreview/;\n\n// Regex for retrieving the fragment style from a class attribute\nexport const FRAGMENT_STYLE_REGEX = /fade-(down|up|right|left|out|in-then-out|in-then-semi-out)|semi-fade-out|current-visible|shrink|grow/;","import { queryAll, extend, createStyleSheet, matches, closest } from '../utils/util.js'\nimport { FRAGMENT_STYLE_REGEX } from '../utils/constants.js'\n\n// Counter used to generate unique IDs for auto-animated elements\nlet autoAnimateCounter = 0;\n\n/**\n * Automatically animates matching elements across\n * slides with the [data-auto-animate] attribute.\n */\nexport default class AutoAnimate {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t}\n\n\t/**\n\t * Runs an auto-animation between the given slides.\n\t *\n\t * @param {HTMLElement} fromSlide\n\t * @param {HTMLElement} toSlide\n\t */\n\trun( fromSlide, toSlide ) {\n\n\t\t// Clean up after prior animations\n\t\tthis.reset();\n\n\t\tlet allSlides = this.Reveal.getSlides();\n\t\tlet toSlideIndex = allSlides.indexOf( toSlide );\n\t\tlet fromSlideIndex = allSlides.indexOf( fromSlide );\n\n\t\t// Ensure that both slides are auto-animate targets with the same data-auto-animate-id value\n\t\t// (including null if absent on both) and that data-auto-animate-restart isn't set on the\n\t\t// physically latter slide (independent of slide direction)\n\t\tif( fromSlide.hasAttribute( 'data-auto-animate' ) && toSlide.hasAttribute( 'data-auto-animate' )\n\t\t\t\t&& fromSlide.getAttribute( 'data-auto-animate-id' ) === toSlide.getAttribute( 'data-auto-animate-id' ) \n\t\t\t\t&& !( toSlideIndex > fromSlideIndex ? toSlide : fromSlide ).hasAttribute( 'data-auto-animate-restart' ) ) {\n\n\t\t\t// Create a new auto-animate sheet\n\t\t\tthis.autoAnimateStyleSheet = this.autoAnimateStyleSheet || createStyleSheet();\n\n\t\t\tlet animationOptions = this.getAutoAnimateOptions( toSlide );\n\n\t\t\t// Set our starting state\n\t\t\tfromSlide.dataset.autoAnimate = 'pending';\n\t\t\ttoSlide.dataset.autoAnimate = 'pending';\n\n\t\t\t// Flag the navigation direction, needed for fragment buildup\n\t\t\tanimationOptions.slideDirection = toSlideIndex > fromSlideIndex ? 'forward' : 'backward';\n\n\t\t\t// If the from-slide is hidden because it has moved outside\n\t\t\t// the view distance, we need to temporarily show it while\n\t\t\t// measuring\n\t\t\tlet fromSlideIsHidden = fromSlide.style.display === 'none';\n\t\t\tif( fromSlideIsHidden ) fromSlide.style.display = this.Reveal.getConfig().display;\n\n\t\t\t// Inject our auto-animate styles for this transition\n\t\t\tlet css = this.getAutoAnimatableElements( fromSlide, toSlide ).map( elements => {\n\t\t\t\treturn this.autoAnimateElements( elements.from, elements.to, elements.options || {}, animationOptions, autoAnimateCounter++ );\n\t\t\t} );\n\n\t\t\tif( fromSlideIsHidden ) fromSlide.style.display = 'none';\n\n\t\t\t// Animate unmatched elements, if enabled\n\t\t\tif( toSlide.dataset.autoAnimateUnmatched !== 'false' && this.Reveal.getConfig().autoAnimateUnmatched === true ) {\n\n\t\t\t\t// Our default timings for unmatched elements\n\t\t\t\tlet defaultUnmatchedDuration = animationOptions.duration * 0.8,\n\t\t\t\t\tdefaultUnmatchedDelay = animationOptions.duration * 0.2;\n\n\t\t\t\tthis.getUnmatchedAutoAnimateElements( toSlide ).forEach( unmatchedElement => {\n\n\t\t\t\t\tlet unmatchedOptions = this.getAutoAnimateOptions( unmatchedElement, animationOptions );\n\t\t\t\t\tlet id = 'unmatched';\n\n\t\t\t\t\t// If there is a duration or delay set specifically for this\n\t\t\t\t\t// element our unmatched elements should adhere to those\n\t\t\t\t\tif( unmatchedOptions.duration !== animationOptions.duration || unmatchedOptions.delay !== animationOptions.delay ) {\n\t\t\t\t\t\tid = 'unmatched-' + autoAnimateCounter++;\n\t\t\t\t\t\tcss.push( `[data-auto-animate=\"running\"] [data-auto-animate-target=\"${id}\"] { transition: opacity ${unmatchedOptions.duration}s ease ${unmatchedOptions.delay}s; }` );\n\t\t\t\t\t}\n\n\t\t\t\t\tunmatchedElement.dataset.autoAnimateTarget = id;\n\n\t\t\t\t}, this );\n\n\t\t\t\t// Our default transition for unmatched elements\n\t\t\t\tcss.push( `[data-auto-animate=\"running\"] [data-auto-animate-target=\"unmatched\"] { transition: opacity ${defaultUnmatchedDuration}s ease ${defaultUnmatchedDelay}s; }` );\n\n\t\t\t}\n\n\t\t\t// Setting the whole chunk of CSS at once is the most\n\t\t\t// efficient way to do this. Using sheet.insertRule\n\t\t\t// is multiple factors slower.\n\t\t\tthis.autoAnimateStyleSheet.innerHTML = css.join( '' );\n\n\t\t\t// Start the animation next cycle\n\t\t\trequestAnimationFrame( () => {\n\t\t\t\tif( this.autoAnimateStyleSheet ) {\n\t\t\t\t\t// This forces our newly injected styles to be applied in Firefox\n\t\t\t\t\tgetComputedStyle( this.autoAnimateStyleSheet ).fontWeight;\n\n\t\t\t\t\ttoSlide.dataset.autoAnimate = 'running';\n\t\t\t\t}\n\t\t\t} );\n\n\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\ttype: 'autoanimate',\n\t\t\t\tdata: {\n\t\t\t\t\tfromSlide,\n\t\t\t\t\ttoSlide,\n\t\t\t\t\tsheet: this.autoAnimateStyleSheet\n\t\t\t\t}\n\t\t\t});\n\n\t\t}\n\n\t}\n\n\t/**\n\t * Rolls back all changes that we've made to the DOM so\n\t * that as part of animating.\n\t */\n\treset() {\n\n\t\t// Reset slides\n\t\tqueryAll( this.Reveal.getRevealElement(), '[data-auto-animate]:not([data-auto-animate=\"\"])' ).forEach( element => {\n\t\t\telement.dataset.autoAnimate = '';\n\t\t} );\n\n\t\t// Reset elements\n\t\tqueryAll( this.Reveal.getRevealElement(), '[data-auto-animate-target]' ).forEach( element => {\n\t\t\tdelete element.dataset.autoAnimateTarget;\n\t\t} );\n\n\t\t// Remove the animation sheet\n\t\tif( this.autoAnimateStyleSheet && this.autoAnimateStyleSheet.parentNode ) {\n\t\t\tthis.autoAnimateStyleSheet.parentNode.removeChild( this.autoAnimateStyleSheet );\n\t\t\tthis.autoAnimateStyleSheet = null;\n\t\t}\n\n\t}\n\n\t/**\n\t * Creates a FLIP animation where the `to` element starts out\n\t * in the `from` element position and animates to its original\n\t * state.\n\t *\n\t * @param {HTMLElement} from\n\t * @param {HTMLElement} to\n\t * @param {Object} elementOptions Options for this element pair\n\t * @param {Object} animationOptions Options set at the slide level\n\t * @param {String} id Unique ID that we can use to identify this\n\t * auto-animate element in the DOM\n\t */\n\tautoAnimateElements( from, to, elementOptions, animationOptions, id ) {\n\n\t\t// 'from' elements are given a data-auto-animate-target with no value,\n\t\t// 'to' elements are are given a data-auto-animate-target with an ID\n\t\tfrom.dataset.autoAnimateTarget = '';\n\t\tto.dataset.autoAnimateTarget = id;\n\n\t\t// Each element may override any of the auto-animate options\n\t\t// like transition easing, duration and delay via data-attributes\n\t\tlet options = this.getAutoAnimateOptions( to, animationOptions );\n\n\t\t// If we're using a custom element matcher the element options\n\t\t// may contain additional transition overrides\n\t\tif( typeof elementOptions.delay !== 'undefined' ) options.delay = elementOptions.delay;\n\t\tif( typeof elementOptions.duration !== 'undefined' ) options.duration = elementOptions.duration;\n\t\tif( typeof elementOptions.easing !== 'undefined' ) options.easing = elementOptions.easing;\n\n\t\tlet fromProps = this.getAutoAnimatableProperties( 'from', from, elementOptions ),\n\t\t\ttoProps = this.getAutoAnimatableProperties( 'to', to, elementOptions );\n\n\t\t// Maintain fragment visibility for matching elements when\n\t\t// we're navigating forwards, this way the viewer won't need\n\t\t// to step through the same fragments twice\n\t\tif( to.classList.contains( 'fragment' ) ) {\n\n\t\t\t// Don't auto-animate the opacity of fragments to avoid\n\t\t\t// conflicts with fragment animations\n\t\t\tdelete toProps.styles['opacity'];\n\n\t\t\tif( from.classList.contains( 'fragment' ) ) {\n\n\t\t\t\tlet fromFragmentStyle = ( from.className.match( FRAGMENT_STYLE_REGEX ) || [''] )[0];\n\t\t\t\tlet toFragmentStyle = ( to.className.match( FRAGMENT_STYLE_REGEX ) || [''] )[0];\n\n\t\t\t\t// Only skip the fragment if the fragment animation style\n\t\t\t\t// remains unchanged\n\t\t\t\tif( fromFragmentStyle === toFragmentStyle && animationOptions.slideDirection === 'forward' ) {\n\t\t\t\t\tto.classList.add( 'visible', 'disabled' );\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t\t// If translation and/or scaling are enabled, css transform\n\t\t// the 'to' element so that it matches the position and size\n\t\t// of the 'from' element\n\t\tif( elementOptions.translate !== false || elementOptions.scale !== false ) {\n\n\t\t\tlet presentationScale = this.Reveal.getScale();\n\n\t\t\tlet delta = {\n\t\t\t\tx: ( fromProps.x - toProps.x ) / presentationScale,\n\t\t\t\ty: ( fromProps.y - toProps.y ) / presentationScale,\n\t\t\t\tscaleX: fromProps.width / toProps.width,\n\t\t\t\tscaleY: fromProps.height / toProps.height\n\t\t\t};\n\n\t\t\t// Limit decimal points to avoid 0.0001px blur and stutter\n\t\t\tdelta.x = Math.round( delta.x * 1000 ) / 1000;\n\t\t\tdelta.y = Math.round( delta.y * 1000 ) / 1000;\n\t\t\tdelta.scaleX = Math.round( delta.scaleX * 1000 ) / 1000;\n\t\t\tdelta.scaleX = Math.round( delta.scaleX * 1000 ) / 1000;\n\n\t\t\tlet translate = elementOptions.translate !== false && ( delta.x !== 0 || delta.y !== 0 ),\n\t\t\t\tscale = elementOptions.scale !== false && ( delta.scaleX !== 0 || delta.scaleY !== 0 );\n\n\t\t\t// No need to transform if nothing's changed\n\t\t\tif( translate || scale ) {\n\n\t\t\t\tlet transform = [];\n\n\t\t\t\tif( translate ) transform.push( `translate(${delta.x}px, ${delta.y}px)` );\n\t\t\t\tif( scale ) transform.push( `scale(${delta.scaleX}, ${delta.scaleY})` );\n\n\t\t\t\tfromProps.styles['transform'] = transform.join( ' ' );\n\t\t\t\tfromProps.styles['transform-origin'] = 'top left';\n\n\t\t\t\ttoProps.styles['transform'] = 'none';\n\n\t\t\t}\n\n\t\t}\n\n\t\t// Delete all unchanged 'to' styles\n\t\tfor( let propertyName in toProps.styles ) {\n\t\t\tconst toValue = toProps.styles[propertyName];\n\t\t\tconst fromValue = fromProps.styles[propertyName];\n\n\t\t\tif( toValue === fromValue ) {\n\t\t\t\tdelete toProps.styles[propertyName];\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// If these property values were set via a custom matcher providing\n\t\t\t\t// an explicit 'from' and/or 'to' value, we always inject those values.\n\t\t\t\tif( toValue.explicitValue === true ) {\n\t\t\t\t\ttoProps.styles[propertyName] = toValue.value;\n\t\t\t\t}\n\n\t\t\t\tif( fromValue.explicitValue === true ) {\n\t\t\t\t\tfromProps.styles[propertyName] = fromValue.value;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tlet css = '';\n\n\t\tlet toStyleProperties = Object.keys( toProps.styles );\n\n\t\t// Only create animate this element IF at least one style\n\t\t// property has changed\n\t\tif( toStyleProperties.length > 0 ) {\n\n\t\t\t// Instantly move to the 'from' state\n\t\t\tfromProps.styles['transition'] = 'none';\n\n\t\t\t// Animate towards the 'to' state\n\t\t\ttoProps.styles['transition'] = `all ${options.duration}s ${options.easing} ${options.delay}s`;\n\t\t\ttoProps.styles['transition-property'] = toStyleProperties.join( ', ' );\n\t\t\ttoProps.styles['will-change'] = toStyleProperties.join( ', ' );\n\n\t\t\t// Build up our custom CSS. We need to override inline styles\n\t\t\t// so we need to make our styles vErY IMPORTANT!1!!\n\t\t\tlet fromCSS = Object.keys( fromProps.styles ).map( propertyName => {\n\t\t\t\treturn propertyName + ': ' + fromProps.styles[propertyName] + ' !important;';\n\t\t\t} ).join( '' );\n\n\t\t\tlet toCSS = Object.keys( toProps.styles ).map( propertyName => {\n\t\t\t\treturn propertyName + ': ' + toProps.styles[propertyName] + ' !important;';\n\t\t\t} ).join( '' );\n\n\t\t\tcss = \t'[data-auto-animate-target=\"'+ id +'\"] {'+ fromCSS +'}' +\n\t\t\t\t\t'[data-auto-animate=\"running\"] [data-auto-animate-target=\"'+ id +'\"] {'+ toCSS +'}';\n\n\t\t}\n\n\t\treturn css;\n\n\t}\n\n\t/**\n\t * Returns the auto-animate options for the given element.\n\t *\n\t * @param {HTMLElement} element Element to pick up options\n\t * from, either a slide or an animation target\n\t * @param {Object} [inheritedOptions] Optional set of existing\n\t * options\n\t */\n\tgetAutoAnimateOptions( element, inheritedOptions ) {\n\n\t\tlet options = {\n\t\t\teasing: this.Reveal.getConfig().autoAnimateEasing,\n\t\t\tduration: this.Reveal.getConfig().autoAnimateDuration,\n\t\t\tdelay: 0\n\t\t};\n\n\t\toptions = extend( options, inheritedOptions );\n\n\t\t// Inherit options from parent elements\n\t\tif( element.parentNode ) {\n\t\t\tlet autoAnimatedParent = closest( element.parentNode, '[data-auto-animate-target]' );\n\t\t\tif( autoAnimatedParent ) {\n\t\t\t\toptions = this.getAutoAnimateOptions( autoAnimatedParent, options );\n\t\t\t}\n\t\t}\n\n\t\tif( element.dataset.autoAnimateEasing ) {\n\t\t\toptions.easing = element.dataset.autoAnimateEasing;\n\t\t}\n\n\t\tif( element.dataset.autoAnimateDuration ) {\n\t\t\toptions.duration = parseFloat( element.dataset.autoAnimateDuration );\n\t\t}\n\n\t\tif( element.dataset.autoAnimateDelay ) {\n\t\t\toptions.delay = parseFloat( element.dataset.autoAnimateDelay );\n\t\t}\n\n\t\treturn options;\n\n\t}\n\n\t/**\n\t * Returns an object containing all of the properties\n\t * that can be auto-animated for the given element and\n\t * their current computed values.\n\t *\n\t * @param {String} direction 'from' or 'to'\n\t */\n\tgetAutoAnimatableProperties( direction, element, elementOptions ) {\n\n\t\tlet config = this.Reveal.getConfig();\n\n\t\tlet properties = { styles: [] };\n\n\t\t// Position and size\n\t\tif( elementOptions.translate !== false || elementOptions.scale !== false ) {\n\t\t\tlet bounds;\n\n\t\t\t// Custom auto-animate may optionally return a custom tailored\n\t\t\t// measurement function\n\t\t\tif( typeof elementOptions.measure === 'function' ) {\n\t\t\t\tbounds = elementOptions.measure( element );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tif( config.center ) {\n\t\t\t\t\t// More precise, but breaks when used in combination\n\t\t\t\t\t// with zoom for scaling the deck ¯\\_(ツ)_/¯\n\t\t\t\t\tbounds = element.getBoundingClientRect();\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tlet scale = this.Reveal.getScale();\n\t\t\t\t\tbounds = {\n\t\t\t\t\t\tx: element.offsetLeft * scale,\n\t\t\t\t\t\ty: element.offsetTop * scale,\n\t\t\t\t\t\twidth: element.offsetWidth * scale,\n\t\t\t\t\t\theight: element.offsetHeight * scale\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tproperties.x = bounds.x;\n\t\t\tproperties.y = bounds.y;\n\t\t\tproperties.width = bounds.width;\n\t\t\tproperties.height = bounds.height;\n\t\t}\n\n\t\tconst computedStyles = getComputedStyle( element );\n\n\t\t// CSS styles\n\t\t( elementOptions.styles || config.autoAnimateStyles ).forEach( style => {\n\t\t\tlet value;\n\n\t\t\t// `style` is either the property name directly, or an object\n\t\t\t// definition of a style property\n\t\t\tif( typeof style === 'string' ) style = { property: style };\n\n\t\t\tif( typeof style.from !== 'undefined' && direction === 'from' ) {\n\t\t\t\tvalue = { value: style.from, explicitValue: true };\n\t\t\t}\n\t\t\telse if( typeof style.to !== 'undefined' && direction === 'to' ) {\n\t\t\t\tvalue = { value: style.to, explicitValue: true };\n\t\t\t}\n\t\t\telse {\n\t\t\t\t// Use a unitless value for line-height so that it inherits properly\n\t\t\t\tif( style.property === 'line-height' ) {\n\t\t\t\t\tvalue = parseFloat( computedStyles['line-height'] ) / parseFloat( computedStyles['font-size'] );\n\t\t\t\t}\n\n\t\t\t\tif( isNaN(value) ) {\n\t\t\t\t\tvalue = computedStyles[style.property];\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif( value !== '' ) {\n\t\t\t\tproperties.styles[style.property] = value;\n\t\t\t}\n\t\t} );\n\n\t\treturn properties;\n\n\t}\n\n\t/**\n\t * Get a list of all element pairs that we can animate\n\t * between the given slides.\n\t *\n\t * @param {HTMLElement} fromSlide\n\t * @param {HTMLElement} toSlide\n\t *\n\t * @return {Array} Each value is an array where [0] is\n\t * the element we're animating from and [1] is the\n\t * element we're animating to\n\t */\n\tgetAutoAnimatableElements( fromSlide, toSlide ) {\n\n\t\tlet matcher = typeof this.Reveal.getConfig().autoAnimateMatcher === 'function' ? this.Reveal.getConfig().autoAnimateMatcher : this.getAutoAnimatePairs;\n\n\t\tlet pairs = matcher.call( this, fromSlide, toSlide );\n\n\t\tlet reserved = [];\n\n\t\t// Remove duplicate pairs\n\t\treturn pairs.filter( ( pair, index ) => {\n\t\t\tif( reserved.indexOf( pair.to ) === -1 ) {\n\t\t\t\treserved.push( pair.to );\n\t\t\t\treturn true;\n\t\t\t}\n\t\t} );\n\n\t}\n\n\t/**\n\t * Identifies matching elements between slides.\n\t *\n\t * You can specify a custom matcher function by using\n\t * the `autoAnimateMatcher` config option.\n\t */\n\tgetAutoAnimatePairs( fromSlide, toSlide ) {\n\n\t\tlet pairs = [];\n\n\t\tconst codeNodes = 'pre';\n\t\tconst textNodes = 'h1, h2, h3, h4, h5, h6, p, li';\n\t\tconst mediaNodes = 'img, video, iframe';\n\n\t\t// Explicit matches via data-id\n\t\tthis.findAutoAnimateMatches( pairs, fromSlide, toSlide, '[data-id]', node => {\n\t\t\treturn node.nodeName + ':::' + node.getAttribute( 'data-id' );\n\t\t} );\n\n\t\t// Text\n\t\tthis.findAutoAnimateMatches( pairs, fromSlide, toSlide, textNodes, node => {\n\t\t\treturn node.nodeName + ':::' + node.innerText;\n\t\t} );\n\n\t\t// Media\n\t\tthis.findAutoAnimateMatches( pairs, fromSlide, toSlide, mediaNodes, node => {\n\t\t\treturn node.nodeName + ':::' + ( node.getAttribute( 'src' ) || node.getAttribute( 'data-src' ) );\n\t\t} );\n\n\t\t// Code\n\t\tthis.findAutoAnimateMatches( pairs, fromSlide, toSlide, codeNodes, node => {\n\t\t\treturn node.nodeName + ':::' + node.innerText;\n\t\t} );\n\n\t\tpairs.forEach( pair => {\n\t\t\t// Disable scale transformations on text nodes, we transition\n\t\t\t// each individual text property instead\n\t\t\tif( matches( pair.from, textNodes ) ) {\n\t\t\t\tpair.options = { scale: false };\n\t\t\t}\n\t\t\t// Animate individual lines of code\n\t\t\telse if( matches( pair.from, codeNodes ) ) {\n\n\t\t\t\t// Transition the code block's width and height instead of scaling\n\t\t\t\t// to prevent its content from being squished\n\t\t\t\tpair.options = { scale: false, styles: [ 'width', 'height' ] };\n\n\t\t\t\t// Lines of code\n\t\t\t\tthis.findAutoAnimateMatches( pairs, pair.from, pair.to, '.hljs .hljs-ln-code', node => {\n\t\t\t\t\treturn node.textContent;\n\t\t\t\t}, {\n\t\t\t\t\tscale: false,\n\t\t\t\t\tstyles: [],\n\t\t\t\t\tmeasure: this.getLocalBoundingBox.bind( this )\n\t\t\t\t} );\n\n\t\t\t\t// Line numbers\n\t\t\t\tthis.findAutoAnimateMatches( pairs, pair.from, pair.to, '.hljs .hljs-ln-line[data-line-number]', node => {\n\t\t\t\t\treturn node.getAttribute( 'data-line-number' );\n\t\t\t\t}, {\n\t\t\t\t\tscale: false,\n\t\t\t\t\tstyles: [ 'width' ],\n\t\t\t\t\tmeasure: this.getLocalBoundingBox.bind( this )\n\t\t\t\t} );\n\n\t\t\t}\n\n\t\t}, this );\n\n\t\treturn pairs;\n\n\t}\n\n\t/**\n\t * Helper method which returns a bounding box based on\n\t * the given elements offset coordinates.\n\t *\n\t * @param {HTMLElement} element\n\t * @return {Object} x, y, width, height\n\t */\n\tgetLocalBoundingBox( element ) {\n\n\t\tconst presentationScale = this.Reveal.getScale();\n\n\t\treturn {\n\t\t\tx: Math.round( ( element.offsetLeft * presentationScale ) * 100 ) / 100,\n\t\t\ty: Math.round( ( element.offsetTop * presentationScale ) * 100 ) / 100,\n\t\t\twidth: Math.round( ( element.offsetWidth * presentationScale ) * 100 ) / 100,\n\t\t\theight: Math.round( ( element.offsetHeight * presentationScale ) * 100 ) / 100\n\t\t};\n\n\t}\n\n\t/**\n\t * Finds matching elements between two slides.\n\t *\n\t * @param {Array} pairs \tList of pairs to push matches to\n\t * @param {HTMLElement} fromScope Scope within the from element exists\n\t * @param {HTMLElement} toScope Scope within the to element exists\n\t * @param {String} selector CSS selector of the element to match\n\t * @param {Function} serializer A function that accepts an element and returns\n\t * a stringified ID based on its contents\n\t * @param {Object} animationOptions Optional config options for this pair\n\t */\n\tfindAutoAnimateMatches( pairs, fromScope, toScope, selector, serializer, animationOptions ) {\n\n\t\tlet fromMatches = {};\n\t\tlet toMatches = {};\n\n\t\t[].slice.call( fromScope.querySelectorAll( selector ) ).forEach( ( element, i ) => {\n\t\t\tconst key = serializer( element );\n\t\t\tif( typeof key === 'string' && key.length ) {\n\t\t\t\tfromMatches[key] = fromMatches[key] || [];\n\t\t\t\tfromMatches[key].push( element );\n\t\t\t}\n\t\t} );\n\n\t\t[].slice.call( toScope.querySelectorAll( selector ) ).forEach( ( element, i ) => {\n\t\t\tconst key = serializer( element );\n\t\t\ttoMatches[key] = toMatches[key] || [];\n\t\t\ttoMatches[key].push( element );\n\n\t\t\tlet fromElement;\n\n\t\t\t// Retrieve the 'from' element\n\t\t\tif( fromMatches[key] ) {\n\t\t\t\tconst primaryIndex = toMatches[key].length - 1;\n\t\t\t\tconst secondaryIndex = fromMatches[key].length - 1;\n\n\t\t\t\t// If there are multiple identical from elements, retrieve\n\t\t\t\t// the one at the same index as our to-element.\n\t\t\t\tif( fromMatches[key][ primaryIndex ] ) {\n\t\t\t\t\tfromElement = fromMatches[key][ primaryIndex ];\n\t\t\t\t\tfromMatches[key][ primaryIndex ] = null;\n\t\t\t\t}\n\t\t\t\t// If there are no matching from-elements at the same index,\n\t\t\t\t// use the last one.\n\t\t\t\telse if( fromMatches[key][ secondaryIndex ] ) {\n\t\t\t\t\tfromElement = fromMatches[key][ secondaryIndex ];\n\t\t\t\t\tfromMatches[key][ secondaryIndex ] = null;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If we've got a matching pair, push it to the list of pairs\n\t\t\tif( fromElement ) {\n\t\t\t\tpairs.push({\n\t\t\t\t\tfrom: fromElement,\n\t\t\t\t\tto: element,\n\t\t\t\t\toptions: animationOptions\n\t\t\t\t});\n\t\t\t}\n\t\t} );\n\n\t}\n\n\t/**\n\t * Returns a all elements within the given scope that should\n\t * be considered unmatched in an auto-animate transition. If\n\t * fading of unmatched elements is turned on, these elements\n\t * will fade when going between auto-animate slides.\n\t *\n\t * Note that parents of auto-animate targets are NOT considered\n\t * unmatched since fading them would break the auto-animation.\n\t *\n\t * @param {HTMLElement} rootElement\n\t * @return {Array}\n\t */\n\tgetUnmatchedAutoAnimateElements( rootElement ) {\n\n\t\treturn [].slice.call( rootElement.children ).reduce( ( result, element ) => {\n\n\t\t\tconst containsAnimatedElements = element.querySelector( '[data-auto-animate-target]' );\n\n\t\t\t// The element is unmatched if\n\t\t\t// - It is not an auto-animate target\n\t\t\t// - It does not contain any auto-animate targets\n\t\t\tif( !element.hasAttribute( 'data-auto-animate-target' ) && !containsAnimatedElements ) {\n\t\t\t\tresult.push( element );\n\t\t\t}\n\n\t\t\tif( element.querySelector( '[data-auto-animate-target]' ) ) {\n\t\t\t\tresult = result.concat( this.getUnmatchedAutoAnimateElements( element ) );\n\t\t\t}\n\n\t\t\treturn result;\n\n\t\t}, [] );\n\n\t}\n\n}\n","import { extend, queryAll } from '../utils/util.js'\n\n/**\n * Handles sorting and navigation of slide fragments.\n * Fragments are elements within a slide that are\n * revealed/animated incrementally.\n */\nexport default class Fragments {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tif( config.fragments === false ) {\n\t\t\tthis.disable();\n\t\t}\n\t\telse if( oldConfig.fragments === false ) {\n\t\t\tthis.enable();\n\t\t}\n\n\t}\n\n\t/**\n\t * If fragments are disabled in the deck, they should all be\n\t * visible rather than stepped through.\n\t */\n\tdisable() {\n\n\t\tqueryAll( this.Reveal.getSlidesElement(), '.fragment' ).forEach( element => {\n\t\t\telement.classList.add( 'visible' );\n\t\t\telement.classList.remove( 'current-fragment' );\n\t\t} );\n\n\t}\n\n\t/**\n\t * Reverse of #disable(). Only called if fragments have\n\t * previously been disabled.\n\t */\n\tenable() {\n\n\t\tqueryAll( this.Reveal.getSlidesElement(), '.fragment' ).forEach( element => {\n\t\t\telement.classList.remove( 'visible' );\n\t\t\telement.classList.remove( 'current-fragment' );\n\t\t} );\n\n\t}\n\n\t/**\n\t * Returns an object describing the available fragment\n\t * directions.\n\t *\n\t * @return {{prev: boolean, next: boolean}}\n\t */\n\tavailableRoutes() {\n\n\t\tlet currentSlide = this.Reveal.getCurrentSlide();\n\t\tif( currentSlide && this.Reveal.getConfig().fragments ) {\n\t\t\tlet fragments = currentSlide.querySelectorAll( '.fragment:not(.disabled)' );\n\t\t\tlet hiddenFragments = currentSlide.querySelectorAll( '.fragment:not(.disabled):not(.visible)' );\n\n\t\t\treturn {\n\t\t\t\tprev: fragments.length - hiddenFragments.length > 0,\n\t\t\t\tnext: !!hiddenFragments.length\n\t\t\t};\n\t\t}\n\t\telse {\n\t\t\treturn { prev: false, next: false };\n\t\t}\n\n\t}\n\n\t/**\n\t * Return a sorted fragments list, ordered by an increasing\n\t * \"data-fragment-index\" attribute.\n\t *\n\t * Fragments will be revealed in the order that they are returned by\n\t * this function, so you can use the index attributes to control the\n\t * order of fragment appearance.\n\t *\n\t * To maintain a sensible default fragment order, fragments are presumed\n\t * to be passed in document order. This function adds a \"fragment-index\"\n\t * attribute to each node if such an attribute is not already present,\n\t * and sets that attribute to an integer value which is the position of\n\t * the fragment within the fragments list.\n\t *\n\t * @param {object[]|*} fragments\n\t * @param {boolean} grouped If true the returned array will contain\n\t * nested arrays for all fragments with the same index\n\t * @return {object[]} sorted Sorted array of fragments\n\t */\n\tsort( fragments, grouped = false ) {\n\n\t\tfragments = Array.from( fragments );\n\n\t\tlet ordered = [],\n\t\t\tunordered = [],\n\t\t\tsorted = [];\n\n\t\t// Group ordered and unordered elements\n\t\tfragments.forEach( fragment => {\n\t\t\tif( fragment.hasAttribute( 'data-fragment-index' ) ) {\n\t\t\t\tlet index = parseInt( fragment.getAttribute( 'data-fragment-index' ), 10 );\n\n\t\t\t\tif( !ordered[index] ) {\n\t\t\t\t\tordered[index] = [];\n\t\t\t\t}\n\n\t\t\t\tordered[index].push( fragment );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tunordered.push( [ fragment ] );\n\t\t\t}\n\t\t} );\n\n\t\t// Append fragments without explicit indices in their\n\t\t// DOM order\n\t\tordered = ordered.concat( unordered );\n\n\t\t// Manually count the index up per group to ensure there\n\t\t// are no gaps\n\t\tlet index = 0;\n\n\t\t// Push all fragments in their sorted order to an array,\n\t\t// this flattens the groups\n\t\tordered.forEach( group => {\n\t\t\tgroup.forEach( fragment => {\n\t\t\t\tsorted.push( fragment );\n\t\t\t\tfragment.setAttribute( 'data-fragment-index', index );\n\t\t\t} );\n\n\t\t\tindex ++;\n\t\t} );\n\n\t\treturn grouped === true ? ordered : sorted;\n\n\t}\n\n\t/**\n\t * Sorts and formats all of fragments in the\n\t * presentation.\n\t */\n\tsortAll() {\n\n\t\tthis.Reveal.getHorizontalSlides().forEach( horizontalSlide => {\n\n\t\t\tlet verticalSlides = queryAll( horizontalSlide, 'section' );\n\t\t\tverticalSlides.forEach( ( verticalSlide, y ) => {\n\n\t\t\t\tthis.sort( verticalSlide.querySelectorAll( '.fragment' ) );\n\n\t\t\t}, this );\n\n\t\t\tif( verticalSlides.length === 0 ) this.sort( horizontalSlide.querySelectorAll( '.fragment' ) );\n\n\t\t} );\n\n\t}\n\n\t/**\n\t * Refreshes the fragments on the current slide so that they\n\t * have the appropriate classes (.visible + .current-fragment).\n\t *\n\t * @param {number} [index] The index of the current fragment\n\t * @param {array} [fragments] Array containing all fragments\n\t * in the current slide\n\t *\n\t * @return {{shown: array, hidden: array}}\n\t */\n\tupdate( index, fragments ) {\n\n\t\tlet changedFragments = {\n\t\t\tshown: [],\n\t\t\thidden: []\n\t\t};\n\n\t\tlet currentSlide = this.Reveal.getCurrentSlide();\n\t\tif( currentSlide && this.Reveal.getConfig().fragments ) {\n\n\t\t\tfragments = fragments || this.sort( currentSlide.querySelectorAll( '.fragment' ) );\n\n\t\t\tif( fragments.length ) {\n\n\t\t\t\tlet maxIndex = 0;\n\n\t\t\t\tif( typeof index !== 'number' ) {\n\t\t\t\t\tlet currentFragment = this.sort( currentSlide.querySelectorAll( '.fragment.visible' ) ).pop();\n\t\t\t\t\tif( currentFragment ) {\n\t\t\t\t\t\tindex = parseInt( currentFragment.getAttribute( 'data-fragment-index' ) || 0, 10 );\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tArray.from( fragments ).forEach( ( el, i ) => {\n\n\t\t\t\t\tif( el.hasAttribute( 'data-fragment-index' ) ) {\n\t\t\t\t\t\ti = parseInt( el.getAttribute( 'data-fragment-index' ), 10 );\n\t\t\t\t\t}\n\n\t\t\t\t\tmaxIndex = Math.max( maxIndex, i );\n\n\t\t\t\t\t// Visible fragments\n\t\t\t\t\tif( i <= index ) {\n\t\t\t\t\t\tlet wasVisible = el.classList.contains( 'visible' )\n\t\t\t\t\t\tel.classList.add( 'visible' );\n\t\t\t\t\t\tel.classList.remove( 'current-fragment' );\n\n\t\t\t\t\t\tif( i === index ) {\n\t\t\t\t\t\t\t// Announce the fragments one by one to the Screen Reader\n\t\t\t\t\t\t\tthis.Reveal.announceStatus( this.Reveal.getStatusText( el ) );\n\n\t\t\t\t\t\t\tel.classList.add( 'current-fragment' );\n\t\t\t\t\t\t\tthis.Reveal.slideContent.startEmbeddedContent( el );\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif( !wasVisible ) {\n\t\t\t\t\t\t\tchangedFragments.shown.push( el )\n\t\t\t\t\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\t\t\t\t\ttarget: el,\n\t\t\t\t\t\t\t\ttype: 'visible',\n\t\t\t\t\t\t\t\tbubbles: false\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// Hidden fragments\n\t\t\t\t\telse {\n\t\t\t\t\t\tlet wasVisible = el.classList.contains( 'visible' )\n\t\t\t\t\t\tel.classList.remove( 'visible' );\n\t\t\t\t\t\tel.classList.remove( 'current-fragment' );\n\n\t\t\t\t\t\tif( wasVisible ) {\n\t\t\t\t\t\t\tthis.Reveal.slideContent.stopEmbeddedContent( el );\n\t\t\t\t\t\t\tchangedFragments.hidden.push( el );\n\t\t\t\t\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\t\t\t\t\ttarget: el,\n\t\t\t\t\t\t\t\ttype: 'hidden',\n\t\t\t\t\t\t\t\tbubbles: false\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t} );\n\n\t\t\t\t// Write the current fragment index to the slide
.\n\t\t\t\t// This can be used by end users to apply styles based on\n\t\t\t\t// the current fragment index.\n\t\t\t\tindex = typeof index === 'number' ? index : -1;\n\t\t\t\tindex = Math.max( Math.min( index, maxIndex ), -1 );\n\t\t\t\tcurrentSlide.setAttribute( 'data-fragment', index );\n\n\t\t\t}\n\n\t\t}\n\n\t\treturn changedFragments;\n\n\t}\n\n\t/**\n\t * Formats the fragments on the given slide so that they have\n\t * valid indices. Call this if fragments are changed in the DOM\n\t * after reveal.js has already initialized.\n\t *\n\t * @param {HTMLElement} slide\n\t * @return {Array} a list of the HTML fragments that were synced\n\t */\n\tsync( slide = this.Reveal.getCurrentSlide() ) {\n\n\t\treturn this.sort( slide.querySelectorAll( '.fragment' ) );\n\n\t}\n\n\t/**\n\t * Navigate to the specified slide fragment.\n\t *\n\t * @param {?number} index The index of the fragment that\n\t * should be shown, -1 means all are invisible\n\t * @param {number} offset Integer offset to apply to the\n\t * fragment index\n\t *\n\t * @return {boolean} true if a change was made in any\n\t * fragments visibility as part of this call\n\t */\n\tgoto( index, offset = 0 ) {\n\n\t\tlet currentSlide = this.Reveal.getCurrentSlide();\n\t\tif( currentSlide && this.Reveal.getConfig().fragments ) {\n\n\t\t\tlet fragments = this.sort( currentSlide.querySelectorAll( '.fragment:not(.disabled)' ) );\n\t\t\tif( fragments.length ) {\n\n\t\t\t\t// If no index is specified, find the current\n\t\t\t\tif( typeof index !== 'number' ) {\n\t\t\t\t\tlet lastVisibleFragment = this.sort( currentSlide.querySelectorAll( '.fragment:not(.disabled).visible' ) ).pop();\n\n\t\t\t\t\tif( lastVisibleFragment ) {\n\t\t\t\t\t\tindex = parseInt( lastVisibleFragment.getAttribute( 'data-fragment-index' ) || 0, 10 );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tindex = -1;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Apply the offset if there is one\n\t\t\t\tindex += offset;\n\n\t\t\t\tlet changedFragments = this.update( index, fragments );\n\n\t\t\t\tif( changedFragments.hidden.length ) {\n\t\t\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\t\t\ttype: 'fragmenthidden',\n\t\t\t\t\t\tdata: {\n\t\t\t\t\t\t\tfragment: changedFragments.hidden[0],\n\t\t\t\t\t\t\tfragments: changedFragments.hidden\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\tif( changedFragments.shown.length ) {\n\t\t\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\t\t\ttype: 'fragmentshown',\n\t\t\t\t\t\tdata: {\n\t\t\t\t\t\t\tfragment: changedFragments.shown[0],\n\t\t\t\t\t\t\tfragments: changedFragments.shown\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\tthis.Reveal.controls.update();\n\t\t\t\tthis.Reveal.progress.update();\n\n\t\t\t\tif( this.Reveal.getConfig().fragmentInURL ) {\n\t\t\t\t\tthis.Reveal.location.writeURL();\n\t\t\t\t}\n\n\t\t\t\treturn !!( changedFragments.shown.length || changedFragments.hidden.length );\n\n\t\t\t}\n\n\t\t}\n\n\t\treturn false;\n\n\t}\n\n\t/**\n\t * Navigate to the next slide fragment.\n\t *\n\t * @return {boolean} true if there was a next fragment,\n\t * false otherwise\n\t */\n\tnext() {\n\n\t\treturn this.goto( null, 1 );\n\n\t}\n\n\t/**\n\t * Navigate to the previous slide fragment.\n\t *\n\t * @return {boolean} true if there was a previous fragment,\n\t * false otherwise\n\t */\n\tprev() {\n\n\t\treturn this.goto( null, -1 );\n\n\t}\n\n}","import { SLIDES_SELECTOR } from '../utils/constants.js'\nimport { extend, queryAll, transformElement } from '../utils/util.js'\n\n/**\n * Handles all logic related to the overview mode\n * (birds-eye view of all slides).\n */\nexport default class Overview {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\tthis.active = false;\n\n\t\tthis.onSlideClicked = this.onSlideClicked.bind( this );\n\n\t}\n\n\t/**\n\t * Displays the overview of slides (quick nav) by scaling\n\t * down and arranging all slide elements.\n\t */\n\tactivate() {\n\n\t\t// Only proceed if enabled in config\n\t\tif( this.Reveal.getConfig().overview && !this.isActive() ) {\n\n\t\t\tthis.active = true;\n\n\t\t\tthis.Reveal.getRevealElement().classList.add( 'overview' );\n\n\t\t\t// Don't auto-slide while in overview mode\n\t\t\tthis.Reveal.cancelAutoSlide();\n\n\t\t\t// Move the backgrounds element into the slide container to\n\t\t\t// that the same scaling is applied\n\t\t\tthis.Reveal.getSlidesElement().appendChild( this.Reveal.getBackgroundsElement() );\n\n\t\t\t// Clicking on an overview slide navigates to it\n\t\t\tqueryAll( this.Reveal.getRevealElement(), SLIDES_SELECTOR ).forEach( slide => {\n\t\t\t\tif( !slide.classList.contains( 'stack' ) ) {\n\t\t\t\t\tslide.addEventListener( 'click', this.onSlideClicked, true );\n\t\t\t\t}\n\t\t\t} );\n\n\t\t\t// Calculate slide sizes\n\t\t\tconst margin = 70;\n\t\t\tconst slideSize = this.Reveal.getComputedSlideSize();\n\t\t\tthis.overviewSlideWidth = slideSize.width + margin;\n\t\t\tthis.overviewSlideHeight = slideSize.height + margin;\n\n\t\t\t// Reverse in RTL mode\n\t\t\tif( this.Reveal.getConfig().rtl ) {\n\t\t\t\tthis.overviewSlideWidth = -this.overviewSlideWidth;\n\t\t\t}\n\n\t\t\tthis.Reveal.updateSlidesVisibility();\n\n\t\t\tthis.layout();\n\t\t\tthis.update();\n\n\t\t\tthis.Reveal.layout();\n\n\t\t\tconst indices = this.Reveal.getIndices();\n\n\t\t\t// Notify observers of the overview showing\n\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\ttype: 'overviewshown',\n\t\t\t\tdata: {\n\t\t\t\t\t'indexh': indices.h,\n\t\t\t\t\t'indexv': indices.v,\n\t\t\t\t\t'currentSlide': this.Reveal.getCurrentSlide()\n\t\t\t\t}\n\t\t\t});\n\n\t\t}\n\n\t}\n\n\t/**\n\t * Uses CSS transforms to position all slides in a grid for\n\t * display inside of the overview mode.\n\t */\n\tlayout() {\n\n\t\t// Layout slides\n\t\tthis.Reveal.getHorizontalSlides().forEach( ( hslide, h ) => {\n\t\t\thslide.setAttribute( 'data-index-h', h );\n\t\t\ttransformElement( hslide, 'translate3d(' + ( h * this.overviewSlideWidth ) + 'px, 0, 0)' );\n\n\t\t\tif( hslide.classList.contains( 'stack' ) ) {\n\n\t\t\t\tqueryAll( hslide, 'section' ).forEach( ( vslide, v ) => {\n\t\t\t\t\tvslide.setAttribute( 'data-index-h', h );\n\t\t\t\t\tvslide.setAttribute( 'data-index-v', v );\n\n\t\t\t\t\ttransformElement( vslide, 'translate3d(0, ' + ( v * this.overviewSlideHeight ) + 'px, 0)' );\n\t\t\t\t} );\n\n\t\t\t}\n\t\t} );\n\n\t\t// Layout slide backgrounds\n\t\tArray.from( this.Reveal.getBackgroundsElement().childNodes ).forEach( ( hbackground, h ) => {\n\t\t\ttransformElement( hbackground, 'translate3d(' + ( h * this.overviewSlideWidth ) + 'px, 0, 0)' );\n\n\t\t\tqueryAll( hbackground, '.slide-background' ).forEach( ( vbackground, v ) => {\n\t\t\t\ttransformElement( vbackground, 'translate3d(0, ' + ( v * this.overviewSlideHeight ) + 'px, 0)' );\n\t\t\t} );\n\t\t} );\n\n\t}\n\n\t/**\n\t * Moves the overview viewport to the current slides.\n\t * Called each time the current slide changes.\n\t */\n\tupdate() {\n\n\t\tconst vmin = Math.min( window.innerWidth, window.innerHeight );\n\t\tconst scale = Math.max( vmin / 5, 150 ) / vmin;\n\t\tconst indices = this.Reveal.getIndices();\n\n\t\tthis.Reveal.transformSlides( {\n\t\t\toverview: [\n\t\t\t\t'scale('+ scale +')',\n\t\t\t\t'translateX('+ ( -indices.h * this.overviewSlideWidth ) +'px)',\n\t\t\t\t'translateY('+ ( -indices.v * this.overviewSlideHeight ) +'px)'\n\t\t\t].join( ' ' )\n\t\t} );\n\n\t}\n\n\t/**\n\t * Exits the slide overview and enters the currently\n\t * active slide.\n\t */\n\tdeactivate() {\n\n\t\t// Only proceed if enabled in config\n\t\tif( this.Reveal.getConfig().overview ) {\n\n\t\t\tthis.active = false;\n\n\t\t\tthis.Reveal.getRevealElement().classList.remove( 'overview' );\n\n\t\t\t// Temporarily add a class so that transitions can do different things\n\t\t\t// depending on whether they are exiting/entering overview, or just\n\t\t\t// moving from slide to slide\n\t\t\tthis.Reveal.getRevealElement().classList.add( 'overview-deactivating' );\n\n\t\t\tsetTimeout( () => {\n\t\t\t\tthis.Reveal.getRevealElement().classList.remove( 'overview-deactivating' );\n\t\t\t}, 1 );\n\n\t\t\t// Move the background element back out\n\t\t\tthis.Reveal.getRevealElement().appendChild( this.Reveal.getBackgroundsElement() );\n\n\t\t\t// Clean up changes made to slides\n\t\t\tqueryAll( this.Reveal.getRevealElement(), SLIDES_SELECTOR ).forEach( slide => {\n\t\t\t\ttransformElement( slide, '' );\n\n\t\t\t\tslide.removeEventListener( 'click', this.onSlideClicked, true );\n\t\t\t} );\n\n\t\t\t// Clean up changes made to backgrounds\n\t\t\tqueryAll( this.Reveal.getBackgroundsElement(), '.slide-background' ).forEach( background => {\n\t\t\t\ttransformElement( background, '' );\n\t\t\t} );\n\n\t\t\tthis.Reveal.transformSlides( { overview: '' } );\n\n\t\t\tconst indices = this.Reveal.getIndices();\n\n\t\t\tthis.Reveal.slide( indices.h, indices.v );\n\t\t\tthis.Reveal.layout();\n\t\t\tthis.Reveal.cueAutoSlide();\n\n\t\t\t// Notify observers of the overview hiding\n\t\t\tthis.Reveal.dispatchEvent({\n\t\t\t\ttype: 'overviewhidden',\n\t\t\t\tdata: {\n\t\t\t\t\t'indexh': indices.h,\n\t\t\t\t\t'indexv': indices.v,\n\t\t\t\t\t'currentSlide': this.Reveal.getCurrentSlide()\n\t\t\t\t}\n\t\t\t});\n\n\t\t}\n\t}\n\n\t/**\n\t * Toggles the slide overview mode on and off.\n\t *\n\t * @param {Boolean} [override] Flag which overrides the\n\t * toggle logic and forcibly sets the desired state. True means\n\t * overview is open, false means it's closed.\n\t */\n\ttoggle( override ) {\n\n\t\tif( typeof override === 'boolean' ) {\n\t\t\toverride ? this.activate() : this.deactivate();\n\t\t}\n\t\telse {\n\t\t\tthis.isActive() ? this.deactivate() : this.activate();\n\t\t}\n\n\t}\n\n\t/**\n\t * Checks if the overview is currently active.\n\t *\n\t * @return {Boolean} true if the overview is active,\n\t * false otherwise\n\t */\n\tisActive() {\n\n\t\treturn this.active;\n\n\t}\n\n\t/**\n\t * Invoked when a slide is and we're in the overview.\n\t *\n\t * @param {object} event\n\t */\n\tonSlideClicked( event ) {\n\n\t\tif( this.isActive() ) {\n\t\t\tevent.preventDefault();\n\n\t\t\tlet element = event.target;\n\n\t\t\twhile( element && !element.nodeName.match( /section/gi ) ) {\n\t\t\t\telement = element.parentNode;\n\t\t\t}\n\n\t\t\tif( element && !element.classList.contains( 'disabled' ) ) {\n\n\t\t\t\tthis.deactivate();\n\n\t\t\t\tif( element.nodeName.match( /section/gi ) ) {\n\t\t\t\t\tlet h = parseInt( element.getAttribute( 'data-index-h' ), 10 ),\n\t\t\t\t\t\tv = parseInt( element.getAttribute( 'data-index-v' ), 10 );\n\n\t\t\t\t\tthis.Reveal.slide( h, v );\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t}\n\n}","import { enterFullscreen } from '../utils/util.js'\n\n/**\n * Handles all reveal.js keyboard interactions.\n */\nexport default class Keyboard {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\t// A key:value map of keyboard keys and descriptions of\n\t\t// the actions they trigger\n\t\tthis.shortcuts = {};\n\n\t\t// Holds custom key code mappings\n\t\tthis.bindings = {};\n\n\t\tthis.onDocumentKeyDown = this.onDocumentKeyDown.bind( this );\n\t\tthis.onDocumentKeyPress = this.onDocumentKeyPress.bind( this );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tif( config.navigationMode === 'linear' ) {\n\t\t\tthis.shortcuts['→ , ↓ , SPACE , N , L , J'] = 'Next slide';\n\t\t\tthis.shortcuts['← , ↑ , P , H , K'] = 'Previous slide';\n\t\t}\n\t\telse {\n\t\t\tthis.shortcuts['N , SPACE'] = 'Next slide';\n\t\t\tthis.shortcuts['P , Shift SPACE'] = 'Previous slide';\n\t\t\tthis.shortcuts['← , H'] = 'Navigate left';\n\t\t\tthis.shortcuts['→ , L'] = 'Navigate right';\n\t\t\tthis.shortcuts['↑ , K'] = 'Navigate up';\n\t\t\tthis.shortcuts['↓ , J'] = 'Navigate down';\n\t\t}\n\n\t\tthis.shortcuts['Alt + ←/↑/→/↓'] = 'Navigate without fragments';\n\t\tthis.shortcuts['Shift + ←/↑/→/↓'] = 'Jump to first/last slide';\n\t\tthis.shortcuts['B , .'] = 'Pause';\n\t\tthis.shortcuts['F'] = 'Fullscreen';\n\t\tthis.shortcuts['G'] = 'Jump to slide';\n\t\tthis.shortcuts['ESC, O'] = 'Slide overview';\n\n\t}\n\n\t/**\n\t * Starts listening for keyboard events.\n\t */\n\tbind() {\n\n\t\tdocument.addEventListener( 'keydown', this.onDocumentKeyDown, false );\n\t\tdocument.addEventListener( 'keypress', this.onDocumentKeyPress, false );\n\n\t}\n\n\t/**\n\t * Stops listening for keyboard events.\n\t */\n\tunbind() {\n\n\t\tdocument.removeEventListener( 'keydown', this.onDocumentKeyDown, false );\n\t\tdocument.removeEventListener( 'keypress', this.onDocumentKeyPress, false );\n\n\t}\n\n\t/**\n\t * Add a custom key binding with optional description to\n\t * be added to the help screen.\n\t */\n\taddKeyBinding( binding, callback ) {\n\n\t\tif( typeof binding === 'object' && binding.keyCode ) {\n\t\t\tthis.bindings[binding.keyCode] = {\n\t\t\t\tcallback: callback,\n\t\t\t\tkey: binding.key,\n\t\t\t\tdescription: binding.description\n\t\t\t};\n\t\t}\n\t\telse {\n\t\t\tthis.bindings[binding] = {\n\t\t\t\tcallback: callback,\n\t\t\t\tkey: null,\n\t\t\t\tdescription: null\n\t\t\t};\n\t\t}\n\n\t}\n\n\t/**\n\t * Removes the specified custom key binding.\n\t */\n\tremoveKeyBinding( keyCode ) {\n\n\t\tdelete this.bindings[keyCode];\n\n\t}\n\n\t/**\n\t * Programmatically triggers a keyboard event\n\t *\n\t * @param {int} keyCode\n\t */\n\ttriggerKey( keyCode ) {\n\n\t\tthis.onDocumentKeyDown( { keyCode } );\n\n\t}\n\n\t/**\n\t * Registers a new shortcut to include in the help overlay\n\t *\n\t * @param {String} key\n\t * @param {String} value\n\t */\n\tregisterKeyboardShortcut( key, value ) {\n\n\t\tthis.shortcuts[key] = value;\n\n\t}\n\n\tgetShortcuts() {\n\n\t\treturn this.shortcuts;\n\n\t}\n\n\tgetBindings() {\n\n\t\treturn this.bindings;\n\n\t}\n\n\t/**\n\t * Handler for the document level 'keypress' event.\n\t *\n\t * @param {object} event\n\t */\n\tonDocumentKeyPress( event ) {\n\n\t\t// Check if the pressed key is question mark\n\t\tif( event.shiftKey && event.charCode === 63 ) {\n\t\t\tthis.Reveal.toggleHelp();\n\t\t}\n\n\t}\n\n\t/**\n\t * Handler for the document level 'keydown' event.\n\t *\n\t * @param {object} event\n\t */\n\tonDocumentKeyDown( event ) {\n\n\t\tlet config = this.Reveal.getConfig();\n\n\t\t// If there's a condition specified and it returns false,\n\t\t// ignore this event\n\t\tif( typeof config.keyboardCondition === 'function' && config.keyboardCondition(event) === false ) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// If keyboardCondition is set, only capture keyboard events\n\t\t// for embedded decks when they are focused\n\t\tif( config.keyboardCondition === 'focused' && !this.Reveal.isFocused() ) {\n\t\t\treturn true;\n\t\t}\n\n\t\t// Shorthand\n\t\tlet keyCode = event.keyCode;\n\n\t\t// Remember if auto-sliding was paused so we can toggle it\n\t\tlet autoSlideWasPaused = !this.Reveal.isAutoSliding();\n\n\t\tthis.Reveal.onUserInput( event );\n\n\t\t// Is there a focused element that could be using the keyboard?\n\t\tlet activeElementIsCE = document.activeElement && document.activeElement.isContentEditable === true;\n\t\tlet activeElementIsInput = document.activeElement && document.activeElement.tagName && /input|textarea/i.test( document.activeElement.tagName );\n\t\tlet activeElementIsNotes = document.activeElement && document.activeElement.className && /speaker-notes/i.test( document.activeElement.className);\n\n\t\t// Whitelist certain modifiers for slide navigation shortcuts\n\t\tlet isNavigationKey = [32, 37, 38, 39, 40, 78, 80].indexOf( event.keyCode ) !== -1;\n\n\t\t// Prevent all other events when a modifier is pressed\n\t\tlet unusedModifier = \t!( isNavigationKey && event.shiftKey || event.altKey ) &&\n\t\t\t\t\t\t\t\t( event.shiftKey || event.altKey || event.ctrlKey || event.metaKey );\n\n\t\t// Disregard the event if there's a focused element or a\n\t\t// keyboard modifier key is present\n\t\tif( activeElementIsCE || activeElementIsInput || activeElementIsNotes || unusedModifier ) return;\n\n\t\t// While paused only allow resume keyboard events; 'b', 'v', '.'\n\t\tlet resumeKeyCodes = [66,86,190,191];\n\t\tlet key;\n\n\t\t// Custom key bindings for togglePause should be able to resume\n\t\tif( typeof config.keyboard === 'object' ) {\n\t\t\tfor( key in config.keyboard ) {\n\t\t\t\tif( config.keyboard[key] === 'togglePause' ) {\n\t\t\t\t\tresumeKeyCodes.push( parseInt( key, 10 ) );\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif( this.Reveal.isPaused() && resumeKeyCodes.indexOf( keyCode ) === -1 ) {\n\t\t\treturn false;\n\t\t}\n\n\t\t// Use linear navigation if we're configured to OR if\n\t\t// the presentation is one-dimensional\n\t\tlet useLinearMode = config.navigationMode === 'linear' || !this.Reveal.hasHorizontalSlides() || !this.Reveal.hasVerticalSlides();\n\n\t\tlet triggered = false;\n\n\t\t// 1. User defined key bindings\n\t\tif( typeof config.keyboard === 'object' ) {\n\n\t\t\tfor( key in config.keyboard ) {\n\n\t\t\t\t// Check if this binding matches the pressed key\n\t\t\t\tif( parseInt( key, 10 ) === keyCode ) {\n\n\t\t\t\t\tlet value = config.keyboard[ key ];\n\n\t\t\t\t\t// Callback function\n\t\t\t\t\tif( typeof value === 'function' ) {\n\t\t\t\t\t\tvalue.apply( null, [ event ] );\n\t\t\t\t\t}\n\t\t\t\t\t// String shortcuts to reveal.js API\n\t\t\t\t\telse if( typeof value === 'string' && typeof this.Reveal[ value ] === 'function' ) {\n\t\t\t\t\t\tthis.Reveal[ value ].call();\n\t\t\t\t\t}\n\n\t\t\t\t\ttriggered = true;\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t\t// 2. Registered custom key bindings\n\t\tif( triggered === false ) {\n\n\t\t\tfor( key in this.bindings ) {\n\n\t\t\t\t// Check if this binding matches the pressed key\n\t\t\t\tif( parseInt( key, 10 ) === keyCode ) {\n\n\t\t\t\t\tlet action = this.bindings[ key ].callback;\n\n\t\t\t\t\t// Callback function\n\t\t\t\t\tif( typeof action === 'function' ) {\n\t\t\t\t\t\taction.apply( null, [ event ] );\n\t\t\t\t\t}\n\t\t\t\t\t// String shortcuts to reveal.js API\n\t\t\t\t\telse if( typeof action === 'string' && typeof this.Reveal[ action ] === 'function' ) {\n\t\t\t\t\t\tthis.Reveal[ action ].call();\n\t\t\t\t\t}\n\n\t\t\t\t\ttriggered = true;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// 3. System defined key bindings\n\t\tif( triggered === false ) {\n\n\t\t\t// Assume true and try to prove false\n\t\t\ttriggered = true;\n\n\t\t\t// P, PAGE UP\n\t\t\tif( keyCode === 80 || keyCode === 33 ) {\n\t\t\t\tthis.Reveal.prev({skipFragments: event.altKey});\n\t\t\t}\n\t\t\t// N, PAGE DOWN\n\t\t\telse if( keyCode === 78 || keyCode === 34 ) {\n\t\t\t\tthis.Reveal.next({skipFragments: event.altKey});\n\t\t\t}\n\t\t\t// H, LEFT\n\t\t\telse if( keyCode === 72 || keyCode === 37 ) {\n\t\t\t\tif( event.shiftKey ) {\n\t\t\t\t\tthis.Reveal.slide( 0 );\n\t\t\t\t}\n\t\t\t\telse if( !this.Reveal.overview.isActive() && useLinearMode ) {\n\t\t\t\t\tthis.Reveal.prev({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.Reveal.left({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// L, RIGHT\n\t\t\telse if( keyCode === 76 || keyCode === 39 ) {\n\t\t\t\tif( event.shiftKey ) {\n\t\t\t\t\tthis.Reveal.slide( this.Reveal.getHorizontalSlides().length - 1 );\n\t\t\t\t}\n\t\t\t\telse if( !this.Reveal.overview.isActive() && useLinearMode ) {\n\t\t\t\t\tthis.Reveal.next({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.Reveal.right({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// K, UP\n\t\t\telse if( keyCode === 75 || keyCode === 38 ) {\n\t\t\t\tif( event.shiftKey ) {\n\t\t\t\t\tthis.Reveal.slide( undefined, 0 );\n\t\t\t\t}\n\t\t\t\telse if( !this.Reveal.overview.isActive() && useLinearMode ) {\n\t\t\t\t\tthis.Reveal.prev({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.Reveal.up({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// J, DOWN\n\t\t\telse if( keyCode === 74 || keyCode === 40 ) {\n\t\t\t\tif( event.shiftKey ) {\n\t\t\t\t\tthis.Reveal.slide( undefined, Number.MAX_VALUE );\n\t\t\t\t}\n\t\t\t\telse if( !this.Reveal.overview.isActive() && useLinearMode ) {\n\t\t\t\t\tthis.Reveal.next({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.Reveal.down({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// HOME\n\t\t\telse if( keyCode === 36 ) {\n\t\t\t\tthis.Reveal.slide( 0 );\n\t\t\t}\n\t\t\t// END\n\t\t\telse if( keyCode === 35 ) {\n\t\t\t\tthis.Reveal.slide( this.Reveal.getHorizontalSlides().length - 1 );\n\t\t\t}\n\t\t\t// SPACE\n\t\t\telse if( keyCode === 32 ) {\n\t\t\t\tif( this.Reveal.overview.isActive() ) {\n\t\t\t\t\tthis.Reveal.overview.deactivate();\n\t\t\t\t}\n\t\t\t\tif( event.shiftKey ) {\n\t\t\t\t\tthis.Reveal.prev({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.Reveal.next({skipFragments: event.altKey});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// TWO-SPOT, SEMICOLON, B, V, PERIOD, LOGITECH PRESENTER TOOLS \"BLACK SCREEN\" BUTTON\n\t\t\telse if( keyCode === 58 || keyCode === 59 || keyCode === 66 || keyCode === 86 || keyCode === 190 || keyCode === 191 ) {\n\t\t\t\tthis.Reveal.togglePause();\n\t\t\t}\n\t\t\t// F\n\t\t\telse if( keyCode === 70 ) {\n\t\t\t\tenterFullscreen( config.embedded ? this.Reveal.getViewportElement() : document.documentElement );\n\t\t\t}\n\t\t\t// A\n\t\t\telse if( keyCode === 65 ) {\n\t\t\t\tif ( config.autoSlideStoppable ) {\n\t\t\t\t\tthis.Reveal.toggleAutoSlide( autoSlideWasPaused );\n\t\t\t\t}\n\t\t\t}\n\t\t\t// G\n\t\t\telse if( keyCode === 71 ) {\n\t\t\t\tif ( config.jumpToSlide ) {\n\t\t\t\t\tthis.Reveal.toggleJumpToSlide();\n\t\t\t\t}\n\t\t\t}\n\t\t\telse {\n\t\t\t\ttriggered = false;\n\t\t\t}\n\n\t\t}\n\n\t\t// If the input resulted in a triggered action we should prevent\n\t\t// the browsers default behavior\n\t\tif( triggered ) {\n\t\t\tevent.preventDefault && event.preventDefault();\n\t\t}\n\t\t// ESC or O key\n\t\telse if( keyCode === 27 || keyCode === 79 ) {\n\t\t\tif( this.Reveal.closeOverlay() === false ) {\n\t\t\t\tthis.Reveal.overview.toggle();\n\t\t\t}\n\n\t\t\tevent.preventDefault && event.preventDefault();\n\t\t}\n\n\t\t// If auto-sliding is enabled we need to cue up\n\t\t// another timeout\n\t\tthis.Reveal.cueAutoSlide();\n\n\t}\n\n}","/**\n * Reads and writes the URL based on reveal.js' current state.\n */\nexport default class Location {\n\n\t// The minimum number of milliseconds that must pass between\n\t// calls to history.replaceState\n\tMAX_REPLACE_STATE_FREQUENCY = 1000\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\t// Delays updates to the URL due to a Chrome thumbnailer bug\n\t\tthis.writeURLTimeout = 0;\n\n\t\tthis.replaceStateTimestamp = 0;\n\n\t\tthis.onWindowHashChange = this.onWindowHashChange.bind( this );\n\n\t}\n\n\tbind() {\n\n\t\twindow.addEventListener( 'hashchange', this.onWindowHashChange, false );\n\n\t}\n\n\tunbind() {\n\n\t\twindow.removeEventListener( 'hashchange', this.onWindowHashChange, false );\n\n\t}\n\n\t/**\n\t * Returns the slide indices for the given hash link.\n\t *\n\t * @param {string} [hash] the hash string that we want to\n\t * find the indices for\n\t *\n\t * @returns slide indices or null\n\t */\n\tgetIndicesFromHash( hash=window.location.hash, options={} ) {\n\n\t\t// Attempt to parse the hash as either an index or name\n\t\tlet name = hash.replace( /^#\\/?/, '' );\n\t\tlet bits = name.split( '/' );\n\n\t\t// If the first bit is not fully numeric and there is a name we\n\t\t// can assume that this is a named link\n\t\tif( !/^[0-9]*$/.test( bits[0] ) && name.length ) {\n\t\t\tlet element;\n\n\t\t\tlet f;\n\n\t\t\t// Parse named links with fragments (#/named-link/2)\n\t\t\tif( /\\/[-\\d]+$/g.test( name ) ) {\n\t\t\t\tf = parseInt( name.split( '/' ).pop(), 10 );\n\t\t\t\tf = isNaN(f) ? undefined : f;\n\t\t\t\tname = name.split( '/' ).shift();\n\t\t\t}\n\n\t\t\t// Ensure the named link is a valid HTML ID attribute\n\t\t\ttry {\n\t\t\t\telement = document.getElementById( decodeURIComponent( name ) );\n\t\t\t}\n\t\t\tcatch ( error ) { }\n\n\t\t\tif( element ) {\n\t\t\t\treturn { ...this.Reveal.getIndices( element ), f };\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tconst config = this.Reveal.getConfig();\n\t\t\tlet hashIndexBase = config.hashOneBasedIndex || options.oneBasedIndex ? 1 : 0;\n\n\t\t\t// Read the index components of the hash\n\t\t\tlet h = ( parseInt( bits[0], 10 ) - hashIndexBase ) || 0,\n\t\t\t\tv = ( parseInt( bits[1], 10 ) - hashIndexBase ) || 0,\n\t\t\t\tf;\n\n\t\t\tif( config.fragmentInURL ) {\n\t\t\t\tf = parseInt( bits[2], 10 );\n\t\t\t\tif( isNaN( f ) ) {\n\t\t\t\t\tf = undefined;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn { h, v, f };\n\t\t}\n\n\t\t// The hash couldn't be parsed or no matching named link was found\n\t\treturn null\n\n\t}\n\n\t/**\n\t * Reads the current URL (hash) and navigates accordingly.\n\t */\n\treadURL() {\n\n\t\tconst currentIndices = this.Reveal.getIndices();\n\t\tconst newIndices = this.getIndicesFromHash();\n\n\t\tif( newIndices ) {\n\t\t\tif( ( newIndices.h !== currentIndices.h || newIndices.v !== currentIndices.v || newIndices.f !== undefined ) ) {\n\t\t\t\t\tthis.Reveal.slide( newIndices.h, newIndices.v, newIndices.f );\n\t\t\t}\n\t\t}\n\t\t// If no new indices are available, we're trying to navigate to\n\t\t// a slide hash that does not exist\n\t\telse {\n\t\t\tthis.Reveal.slide( currentIndices.h || 0, currentIndices.v || 0 );\n\t\t}\n\n\t}\n\n\t/**\n\t * Updates the page URL (hash) to reflect the current\n\t * state.\n\t *\n\t * @param {number} delay The time in ms to wait before\n\t * writing the hash\n\t */\n\twriteURL( delay ) {\n\n\t\tlet config = this.Reveal.getConfig();\n\t\tlet currentSlide = this.Reveal.getCurrentSlide();\n\n\t\t// Make sure there's never more than one timeout running\n\t\tclearTimeout( this.writeURLTimeout );\n\n\t\t// If a delay is specified, timeout this call\n\t\tif( typeof delay === 'number' ) {\n\t\t\tthis.writeURLTimeout = setTimeout( this.writeURL, delay );\n\t\t}\n\t\telse if( currentSlide ) {\n\n\t\t\tlet hash = this.getHash();\n\n\t\t\t// If we're configured to push to history OR the history\n\t\t\t// API is not available.\n\t\t\tif( config.history ) {\n\t\t\t\twindow.location.hash = hash;\n\t\t\t}\n\t\t\t// If we're configured to reflect the current slide in the\n\t\t\t// URL without pushing to history.\n\t\t\telse if( config.hash ) {\n\t\t\t\t// If the hash is empty, don't add it to the URL\n\t\t\t\tif( hash === '/' ) {\n\t\t\t\t\tthis.debouncedReplaceState( window.location.pathname + window.location.search );\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tthis.debouncedReplaceState( '#' + hash );\n\t\t\t\t}\n\t\t\t}\n\t\t\t// UPDATE: The below nuking of all hash changes breaks\n\t\t\t// anchors on pages where reveal.js is running. Removed\n\t\t\t// in 4.0. Why was it here in the first place? ¯\\_(ツ)_/¯\n\t\t\t//\n\t\t\t// If history and hash are both disabled, a hash may still\n\t\t\t// be added to the URL by clicking on a href with a hash\n\t\t\t// target. Counter this by always removing the hash.\n\t\t\t// else {\n\t\t\t// \twindow.history.replaceState( null, null, window.location.pathname + window.location.search );\n\t\t\t// }\n\n\t\t}\n\n\t}\n\n\treplaceState( url ) {\n\n\t\twindow.history.replaceState( null, null, url );\n\t\tthis.replaceStateTimestamp = Date.now();\n\n\t}\n\n\tdebouncedReplaceState( url ) {\n\n\t\tclearTimeout( this.replaceStateTimeout );\n\n\t\tif( Date.now() - this.replaceStateTimestamp > this.MAX_REPLACE_STATE_FREQUENCY ) {\n\t\t\tthis.replaceState( url );\n\t\t}\n\t\telse {\n\t\t\tthis.replaceStateTimeout = setTimeout( () => this.replaceState( url ), this.MAX_REPLACE_STATE_FREQUENCY );\n\t\t}\n\n\t}\n\n\t/**\n\t * Return a hash URL that will resolve to the given slide location.\n\t *\n\t * @param {HTMLElement} [slide=currentSlide] The slide to link to\n\t */\n\tgetHash( slide ) {\n\n\t\tlet url = '/';\n\n\t\t// Attempt to create a named link based on the slide's ID\n\t\tlet s = slide || this.Reveal.getCurrentSlide();\n\t\tlet id = s ? s.getAttribute( 'id' ) : null;\n\t\tif( id ) {\n\t\t\tid = encodeURIComponent( id );\n\t\t}\n\n\t\tlet index = this.Reveal.getIndices( slide );\n\t\tif( !this.Reveal.getConfig().fragmentInURL ) {\n\t\t\tindex.f = undefined;\n\t\t}\n\n\t\t// If the current slide has an ID, use that as a named link,\n\t\t// but we don't support named links with a fragment index\n\t\tif( typeof id === 'string' && id.length ) {\n\t\t\turl = '/' + id;\n\n\t\t\t// If there is also a fragment, append that at the end\n\t\t\t// of the named link, like: #/named-link/2\n\t\t\tif( index.f >= 0 ) url += '/' + index.f;\n\t\t}\n\t\t// Otherwise use the /h/v index\n\t\telse {\n\t\t\tlet hashIndexBase = this.Reveal.getConfig().hashOneBasedIndex ? 1 : 0;\n\t\t\tif( index.h > 0 || index.v > 0 || index.f >= 0 ) url += index.h + hashIndexBase;\n\t\t\tif( index.v > 0 || index.f >= 0 ) url += '/' + (index.v + hashIndexBase );\n\t\t\tif( index.f >= 0 ) url += '/' + index.f;\n\t\t}\n\n\t\treturn url;\n\n\t}\n\n\t/**\n\t * Handler for the window level 'hashchange' event.\n\t *\n\t * @param {object} [event]\n\t */\n\tonWindowHashChange( event ) {\n\n\t\tthis.readURL();\n\n\t}\n\n}","import { queryAll } from '../utils/util.js'\nimport { isAndroid } from '../utils/device.js'\n\n/**\n * Manages our presentation controls. This includes both\n * the built-in control arrows as well as event monitoring\n * of any elements within the presentation with either of the\n * following helper classes:\n * - .navigate-up\n * - .navigate-right\n * - .navigate-down\n * - .navigate-left\n * - .navigate-next\n * - .navigate-prev\n */\nexport default class Controls {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\tthis.onNavigateLeftClicked = this.onNavigateLeftClicked.bind( this );\n\t\tthis.onNavigateRightClicked = this.onNavigateRightClicked.bind( this );\n\t\tthis.onNavigateUpClicked = this.onNavigateUpClicked.bind( this );\n\t\tthis.onNavigateDownClicked = this.onNavigateDownClicked.bind( this );\n\t\tthis.onNavigatePrevClicked = this.onNavigatePrevClicked.bind( this );\n\t\tthis.onNavigateNextClicked = this.onNavigateNextClicked.bind( this );\n\n\t}\n\n\trender() {\n\n\t\tconst rtl = this.Reveal.getConfig().rtl;\n\t\tconst revealElement = this.Reveal.getRevealElement();\n\n\t\tthis.element = document.createElement( 'aside' );\n\t\tthis.element.className = 'controls';\n\t\tthis.element.innerHTML =\n\t\t\t`\n\t\t\t\n\t\t\t\n\t\t\t`;\n\n\t\tthis.Reveal.getRevealElement().appendChild( this.element );\n\n\t\t// There can be multiple instances of controls throughout the page\n\t\tthis.controlsLeft = queryAll( revealElement, '.navigate-left' );\n\t\tthis.controlsRight = queryAll( revealElement, '.navigate-right' );\n\t\tthis.controlsUp = queryAll( revealElement, '.navigate-up' );\n\t\tthis.controlsDown = queryAll( revealElement, '.navigate-down' );\n\t\tthis.controlsPrev = queryAll( revealElement, '.navigate-prev' );\n\t\tthis.controlsNext = queryAll( revealElement, '.navigate-next' );\n\n\t\t// The left, right and down arrows in the standard reveal.js controls\n\t\tthis.controlsRightArrow = this.element.querySelector( '.navigate-right' );\n\t\tthis.controlsLeftArrow = this.element.querySelector( '.navigate-left' );\n\t\tthis.controlsDownArrow = this.element.querySelector( '.navigate-down' );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tthis.element.style.display = config.controls ? 'block' : 'none';\n\n\t\tthis.element.setAttribute( 'data-controls-layout', config.controlsLayout );\n\t\tthis.element.setAttribute( 'data-controls-back-arrows', config.controlsBackArrows );\n\n\t}\n\n\tbind() {\n\n\t\t// Listen to both touch and click events, in case the device\n\t\t// supports both\n\t\tlet pointerEvents = [ 'touchstart', 'click' ];\n\n\t\t// Only support touch for Android, fixes double navigations in\n\t\t// stock browser\n\t\tif( isAndroid ) {\n\t\t\tpointerEvents = [ 'touchstart' ];\n\t\t}\n\n\t\tpointerEvents.forEach( eventName => {\n\t\t\tthis.controlsLeft.forEach( el => el.addEventListener( eventName, this.onNavigateLeftClicked, false ) );\n\t\t\tthis.controlsRight.forEach( el => el.addEventListener( eventName, this.onNavigateRightClicked, false ) );\n\t\t\tthis.controlsUp.forEach( el => el.addEventListener( eventName, this.onNavigateUpClicked, false ) );\n\t\t\tthis.controlsDown.forEach( el => el.addEventListener( eventName, this.onNavigateDownClicked, false ) );\n\t\t\tthis.controlsPrev.forEach( el => el.addEventListener( eventName, this.onNavigatePrevClicked, false ) );\n\t\t\tthis.controlsNext.forEach( el => el.addEventListener( eventName, this.onNavigateNextClicked, false ) );\n\t\t} );\n\n\t}\n\n\tunbind() {\n\n\t\t[ 'touchstart', 'click' ].forEach( eventName => {\n\t\t\tthis.controlsLeft.forEach( el => el.removeEventListener( eventName, this.onNavigateLeftClicked, false ) );\n\t\t\tthis.controlsRight.forEach( el => el.removeEventListener( eventName, this.onNavigateRightClicked, false ) );\n\t\t\tthis.controlsUp.forEach( el => el.removeEventListener( eventName, this.onNavigateUpClicked, false ) );\n\t\t\tthis.controlsDown.forEach( el => el.removeEventListener( eventName, this.onNavigateDownClicked, false ) );\n\t\t\tthis.controlsPrev.forEach( el => el.removeEventListener( eventName, this.onNavigatePrevClicked, false ) );\n\t\t\tthis.controlsNext.forEach( el => el.removeEventListener( eventName, this.onNavigateNextClicked, false ) );\n\t\t} );\n\n\t}\n\n\t/**\n\t * Updates the state of all control/navigation arrows.\n\t */\n\tupdate() {\n\n\t\tlet routes = this.Reveal.availableRoutes();\n\n\t\t// Remove the 'enabled' class from all directions\n\t\t[...this.controlsLeft, ...this.controlsRight, ...this.controlsUp, ...this.controlsDown, ...this.controlsPrev, ...this.controlsNext].forEach( node => {\n\t\t\tnode.classList.remove( 'enabled', 'fragmented' );\n\n\t\t\t// Set 'disabled' attribute on all directions\n\t\t\tnode.setAttribute( 'disabled', 'disabled' );\n\t\t} );\n\n\t\t// Add the 'enabled' class to the available routes; remove 'disabled' attribute to enable buttons\n\t\tif( routes.left ) this.controlsLeft.forEach( el => { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\tif( routes.right ) this.controlsRight.forEach( el => { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\tif( routes.up ) this.controlsUp.forEach( el => { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\tif( routes.down ) this.controlsDown.forEach( el => { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\n\t\t// Prev/next buttons\n\t\tif( routes.left || routes.up ) this.controlsPrev.forEach( el => { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\tif( routes.right || routes.down ) this.controlsNext.forEach( el => { el.classList.add( 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\n\t\t// Highlight fragment directions\n\t\tlet currentSlide = this.Reveal.getCurrentSlide();\n\t\tif( currentSlide ) {\n\n\t\t\tlet fragmentsRoutes = this.Reveal.fragments.availableRoutes();\n\n\t\t\t// Always apply fragment decorator to prev/next buttons\n\t\t\tif( fragmentsRoutes.prev ) this.controlsPrev.forEach( el => { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\t\tif( fragmentsRoutes.next ) this.controlsNext.forEach( el => { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\n\t\t\t// Apply fragment decorators to directional buttons based on\n\t\t\t// what slide axis they are in\n\t\t\tif( this.Reveal.isVerticalSlide( currentSlide ) ) {\n\t\t\t\tif( fragmentsRoutes.prev ) this.controlsUp.forEach( el => { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\t\t\tif( fragmentsRoutes.next ) this.controlsDown.forEach( el => { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tif( fragmentsRoutes.prev ) this.controlsLeft.forEach( el => { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\t\t\tif( fragmentsRoutes.next ) this.controlsRight.forEach( el => { el.classList.add( 'fragmented', 'enabled' ); el.removeAttribute( 'disabled' ); } );\n\t\t\t}\n\n\t\t}\n\n\t\tif( this.Reveal.getConfig().controlsTutorial ) {\n\n\t\t\tlet indices = this.Reveal.getIndices();\n\n\t\t\t// Highlight control arrows with an animation to ensure\n\t\t\t// that the viewer knows how to navigate\n\t\t\tif( !this.Reveal.hasNavigatedVertically() && routes.down ) {\n\t\t\t\tthis.controlsDownArrow.classList.add( 'highlight' );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tthis.controlsDownArrow.classList.remove( 'highlight' );\n\n\t\t\t\tif( this.Reveal.getConfig().rtl ) {\n\n\t\t\t\t\tif( !this.Reveal.hasNavigatedHorizontally() && routes.left && indices.v === 0 ) {\n\t\t\t\t\t\tthis.controlsLeftArrow.classList.add( 'highlight' );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis.controlsLeftArrow.classList.remove( 'highlight' );\n\t\t\t\t\t}\n\n\t\t\t\t} else {\n\n\t\t\t\t\tif( !this.Reveal.hasNavigatedHorizontally() && routes.right && indices.v === 0 ) {\n\t\t\t\t\t\tthis.controlsRightArrow.classList.add( 'highlight' );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis.controlsRightArrow.classList.remove( 'highlight' );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tdestroy() {\n\n\t\tthis.unbind();\n\t\tthis.element.remove();\n\n\t}\n\n\t/**\n\t * Event handlers for navigation control buttons.\n\t */\n\tonNavigateLeftClicked( event ) {\n\n\t\tevent.preventDefault();\n\t\tthis.Reveal.onUserInput();\n\n\t\tif( this.Reveal.getConfig().navigationMode === 'linear' ) {\n\t\t\tthis.Reveal.prev();\n\t\t}\n\t\telse {\n\t\t\tthis.Reveal.left();\n\t\t}\n\n\t}\n\n\tonNavigateRightClicked( event ) {\n\n\t\tevent.preventDefault();\n\t\tthis.Reveal.onUserInput();\n\n\t\tif( this.Reveal.getConfig().navigationMode === 'linear' ) {\n\t\t\tthis.Reveal.next();\n\t\t}\n\t\telse {\n\t\t\tthis.Reveal.right();\n\t\t}\n\n\t}\n\n\tonNavigateUpClicked( event ) {\n\n\t\tevent.preventDefault();\n\t\tthis.Reveal.onUserInput();\n\n\t\tthis.Reveal.up();\n\n\t}\n\n\tonNavigateDownClicked( event ) {\n\n\t\tevent.preventDefault();\n\t\tthis.Reveal.onUserInput();\n\n\t\tthis.Reveal.down();\n\n\t}\n\n\tonNavigatePrevClicked( event ) {\n\n\t\tevent.preventDefault();\n\t\tthis.Reveal.onUserInput();\n\n\t\tthis.Reveal.prev();\n\n\t}\n\n\tonNavigateNextClicked( event ) {\n\n\t\tevent.preventDefault();\n\t\tthis.Reveal.onUserInput();\n\n\t\tthis.Reveal.next();\n\n\t}\n\n\n}","/**\n * Creates a visual progress bar for the presentation.\n */\nexport default class Progress {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\tthis.onProgressClicked = this.onProgressClicked.bind( this );\n\n\t}\n\n\trender() {\n\n\t\tthis.element = document.createElement( 'div' );\n\t\tthis.element.className = 'progress';\n\t\tthis.Reveal.getRevealElement().appendChild( this.element );\n\n\t\tthis.bar = document.createElement( 'span' );\n\t\tthis.element.appendChild( this.bar );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tthis.element.style.display = config.progress ? 'block' : 'none';\n\n\t}\n\n\tbind() {\n\n\t\tif( this.Reveal.getConfig().progress && this.element ) {\n\t\t\tthis.element.addEventListener( 'click', this.onProgressClicked, false );\n\t\t}\n\n\t}\n\n\tunbind() {\n\n\t\tif ( this.Reveal.getConfig().progress && this.element ) {\n\t\t\tthis.element.removeEventListener( 'click', this.onProgressClicked, false );\n\t\t}\n\n\t}\n\n\t/**\n\t * Updates the progress bar to reflect the current slide.\n\t */\n\tupdate() {\n\n\t\t// Update progress if enabled\n\t\tif( this.Reveal.getConfig().progress && this.bar ) {\n\n\t\t\tlet scale = this.Reveal.getProgress();\n\n\t\t\t// Don't fill the progress bar if there's only one slide\n\t\t\tif( this.Reveal.getTotalSlides() < 2 ) {\n\t\t\t\tscale = 0;\n\t\t\t}\n\n\t\t\tthis.bar.style.transform = 'scaleX('+ scale +')';\n\n\t\t}\n\n\t}\n\n\tgetMaxWidth() {\n\n\t\treturn this.Reveal.getRevealElement().offsetWidth;\n\n\t}\n\n\t/**\n\t * Clicking on the progress bar results in a navigation to the\n\t * closest approximate horizontal slide using this equation:\n\t *\n\t * ( clickX / presentationWidth ) * numberOfSlides\n\t *\n\t * @param {object} event\n\t */\n\tonProgressClicked( event ) {\n\n\t\tthis.Reveal.onUserInput( event );\n\n\t\tevent.preventDefault();\n\n\t\tlet slides = this.Reveal.getSlides();\n\t\tlet slidesTotal = slides.length;\n\t\tlet slideIndex = Math.floor( ( event.clientX / this.getMaxWidth() ) * slidesTotal );\n\n\t\tif( this.Reveal.getConfig().rtl ) {\n\t\t\tslideIndex = slidesTotal - slideIndex;\n\t\t}\n\n\t\tlet targetIndices = this.Reveal.getIndices(slides[slideIndex]);\n\t\tthis.Reveal.slide( targetIndices.h, targetIndices.v );\n\n\t}\n\n\tdestroy() {\n\n\t\tthis.element.remove();\n\n\t}\n\n}","/**\n * Handles hiding of the pointer/cursor when inactive.\n */\nexport default class Pointer {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\t// Throttles mouse wheel navigation\n\t\tthis.lastMouseWheelStep = 0;\n\n\t\t// Is the mouse pointer currently hidden from view\n\t\tthis.cursorHidden = false;\n\n\t\t// Timeout used to determine when the cursor is inactive\n\t\tthis.cursorInactiveTimeout = 0;\n\n\t\tthis.onDocumentCursorActive = this.onDocumentCursorActive.bind( this );\n\t\tthis.onDocumentMouseScroll = this.onDocumentMouseScroll.bind( this );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tif( config.mouseWheel ) {\n\t\t\tdocument.addEventListener( 'DOMMouseScroll', this.onDocumentMouseScroll, false ); // FF\n\t\t\tdocument.addEventListener( 'mousewheel', this.onDocumentMouseScroll, false );\n\t\t}\n\t\telse {\n\t\t\tdocument.removeEventListener( 'DOMMouseScroll', this.onDocumentMouseScroll, false ); // FF\n\t\t\tdocument.removeEventListener( 'mousewheel', this.onDocumentMouseScroll, false );\n\t\t}\n\n\t\t// Auto-hide the mouse pointer when its inactive\n\t\tif( config.hideInactiveCursor ) {\n\t\t\tdocument.addEventListener( 'mousemove', this.onDocumentCursorActive, false );\n\t\t\tdocument.addEventListener( 'mousedown', this.onDocumentCursorActive, false );\n\t\t}\n\t\telse {\n\t\t\tthis.showCursor();\n\n\t\t\tdocument.removeEventListener( 'mousemove', this.onDocumentCursorActive, false );\n\t\t\tdocument.removeEventListener( 'mousedown', this.onDocumentCursorActive, false );\n\t\t}\n\n\t}\n\n\t/**\n\t * Shows the mouse pointer after it has been hidden with\n\t * #hideCursor.\n\t */\n\tshowCursor() {\n\n\t\tif( this.cursorHidden ) {\n\t\t\tthis.cursorHidden = false;\n\t\t\tthis.Reveal.getRevealElement().style.cursor = '';\n\t\t}\n\n\t}\n\n\t/**\n\t * Hides the mouse pointer when it's on top of the .reveal\n\t * container.\n\t */\n\thideCursor() {\n\n\t\tif( this.cursorHidden === false ) {\n\t\t\tthis.cursorHidden = true;\n\t\t\tthis.Reveal.getRevealElement().style.cursor = 'none';\n\t\t}\n\n\t}\n\n\tdestroy() {\n\n\t\tthis.showCursor();\n\n\t\tdocument.removeEventListener( 'DOMMouseScroll', this.onDocumentMouseScroll, false );\n\t\tdocument.removeEventListener( 'mousewheel', this.onDocumentMouseScroll, false );\n\t\tdocument.removeEventListener( 'mousemove', this.onDocumentCursorActive, false );\n\t\tdocument.removeEventListener( 'mousedown', this.onDocumentCursorActive, false );\n\n\t}\n\n\t/**\n\t * Called whenever there is mouse input at the document level\n\t * to determine if the cursor is active or not.\n\t *\n\t * @param {object} event\n\t */\n\tonDocumentCursorActive( event ) {\n\n\t\tthis.showCursor();\n\n\t\tclearTimeout( this.cursorInactiveTimeout );\n\n\t\tthis.cursorInactiveTimeout = setTimeout( this.hideCursor.bind( this ), this.Reveal.getConfig().hideCursorTime );\n\n\t}\n\n\t/**\n\t * Handles mouse wheel scrolling, throttled to avoid skipping\n\t * multiple slides.\n\t *\n\t * @param {object} event\n\t */\n\tonDocumentMouseScroll( event ) {\n\n\t\tif( Date.now() - this.lastMouseWheelStep > 1000 ) {\n\n\t\t\tthis.lastMouseWheelStep = Date.now();\n\n\t\t\tlet delta = event.detail || -event.wheelDelta;\n\t\t\tif( delta > 0 ) {\n\t\t\t\tthis.Reveal.next();\n\t\t\t}\n\t\t\telse if( delta < 0 ) {\n\t\t\t\tthis.Reveal.prev();\n\t\t\t}\n\n\t\t}\n\n\t}\n\n}","/**\n * Loads a JavaScript file from the given URL and executes it.\n *\n * @param {string} url Address of the .js file to load\n * @param {function} callback Method to invoke when the script\n * has loaded and executed\n */\nexport const loadScript = ( url, callback ) => {\n\n\tconst script = document.createElement( 'script' );\n\tscript.type = 'text/javascript';\n\tscript.async = false;\n\tscript.defer = false;\n\tscript.src = url;\n\n\tif( typeof callback === 'function' ) {\n\n\t\t// Success callback\n\t\tscript.onload = script.onreadystatechange = event => {\n\t\t\tif( event.type === 'load' || /loaded|complete/.test( script.readyState ) ) {\n\n\t\t\t\t// Kill event listeners\n\t\t\t\tscript.onload = script.onreadystatechange = script.onerror = null;\n\n\t\t\t\tcallback();\n\n\t\t\t}\n\t\t};\n\n\t\t// Error callback\n\t\tscript.onerror = err => {\n\n\t\t\t// Kill event listeners\n\t\t\tscript.onload = script.onreadystatechange = script.onerror = null;\n\n\t\t\tcallback( new Error( 'Failed loading script: ' + script.src + '\\n' + err ) );\n\n\t\t};\n\n\t}\n\n\t// Append the script at the end of \n\tconst head = document.querySelector( 'head' );\n\thead.insertBefore( script, head.lastChild );\n\n}","import { loadScript } from '../utils/loader.js'\n\n/**\n * Manages loading and registering of reveal.js plugins.\n */\nexport default class Plugins {\n\n\tconstructor( reveal ) {\n\n\t\tthis.Reveal = reveal;\n\n\t\t// Flags our current state (idle -> loading -> loaded)\n\t\tthis.state = 'idle';\n\n\t\t// An id:instance map of currently registered plugins\n\t\tthis.registeredPlugins = {};\n\n\t\tthis.asyncDependencies = [];\n\n\t}\n\n\t/**\n\t * Loads reveal.js dependencies, registers and\n\t * initializes plugins.\n\t *\n\t * Plugins are direct references to a reveal.js plugin\n\t * object that we register and initialize after any\n\t * synchronous dependencies have loaded.\n\t *\n\t * Dependencies are defined via the 'dependencies' config\n\t * option and will be loaded prior to starting reveal.js.\n\t * Some dependencies may have an 'async' flag, if so they\n\t * will load after reveal.js has been started up.\n\t */\n\tload( plugins, dependencies ) {\n\n\t\tthis.state = 'loading';\n\n\t\tplugins.forEach( this.registerPlugin.bind( this ) );\n\n\t\treturn new Promise( resolve => {\n\n\t\t\tlet scripts = [],\n\t\t\t\tscriptsToLoad = 0;\n\n\t\t\tdependencies.forEach( s => {\n\t\t\t\t// Load if there's no condition or the condition is truthy\n\t\t\t\tif( !s.condition || s.condition() ) {\n\t\t\t\t\tif( s.async ) {\n\t\t\t\t\t\tthis.asyncDependencies.push( s );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tscripts.push( s );\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} );\n\n\t\t\tif( scripts.length ) {\n\t\t\t\tscriptsToLoad = scripts.length;\n\n\t\t\t\tconst scriptLoadedCallback = (s) => {\n\t\t\t\t\tif( s && typeof s.callback === 'function' ) s.callback();\n\n\t\t\t\t\tif( --scriptsToLoad === 0 ) {\n\t\t\t\t\t\tthis.initPlugins().then( resolve );\n\t\t\t\t\t}\n\t\t\t\t};\n\n\t\t\t\t// Load synchronous scripts\n\t\t\t\tscripts.forEach( s => {\n\t\t\t\t\tif( typeof s.id === 'string' ) {\n\t\t\t\t\t\tthis.registerPlugin( s );\n\t\t\t\t\t\tscriptLoadedCallback( s );\n\t\t\t\t\t}\n\t\t\t\t\telse if( typeof s.src === 'string' ) {\n\t\t\t\t\t\tloadScript( s.src, () => scriptLoadedCallback(s) );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tconsole.warn( 'Unrecognized plugin format', s );\n\t\t\t\t\t\tscriptLoadedCallback();\n\t\t\t\t\t}\n\t\t\t\t} );\n\t\t\t}\n\t\t\telse {\n\t\t\t\tthis.initPlugins().then( resolve );\n\t\t\t}\n\n\t\t} );\n\n\t}\n\n\t/**\n\t * Initializes our plugins and waits for them to be ready\n\t * before proceeding.\n\t */\n\tinitPlugins() {\n\n\t\treturn new Promise( resolve => {\n\n\t\t\tlet pluginValues = Object.values( this.registeredPlugins );\n\t\t\tlet pluginsToInitialize = pluginValues.length;\n\n\t\t\t// If there are no plugins, skip this step\n\t\t\tif( pluginsToInitialize === 0 ) {\n\t\t\t\tthis.loadAsync().then( resolve );\n\t\t\t}\n\t\t\t// ... otherwise initialize plugins\n\t\t\telse {\n\n\t\t\t\tlet initNextPlugin;\n\n\t\t\t\tlet afterPlugInitialized = () => {\n\t\t\t\t\tif( --pluginsToInitialize === 0 ) {\n\t\t\t\t\t\tthis.loadAsync().then( resolve );\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tinitNextPlugin();\n\t\t\t\t\t}\n\t\t\t\t};\n\n\t\t\t\tlet i = 0;\n\n\t\t\t\t// Initialize plugins serially\n\t\t\t\tinitNextPlugin = () => {\n\n\t\t\t\t\tlet plugin = pluginValues[i++];\n\n\t\t\t\t\t// If the plugin has an 'init' method, invoke it\n\t\t\t\t\tif( typeof plugin.init === 'function' ) {\n\t\t\t\t\t\tlet promise = plugin.init( this.Reveal );\n\n\t\t\t\t\t\t// If the plugin returned a Promise, wait for it\n\t\t\t\t\t\tif( promise && typeof promise.then === 'function' ) {\n\t\t\t\t\t\t\tpromise.then( afterPlugInitialized );\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tafterPlugInitialized();\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tafterPlugInitialized();\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tinitNextPlugin();\n\n\t\t\t}\n\n\t\t} )\n\n\t}\n\n\t/**\n\t * Loads all async reveal.js dependencies.\n\t */\n\tloadAsync() {\n\n\t\tthis.state = 'loaded';\n\n\t\tif( this.asyncDependencies.length ) {\n\t\t\tthis.asyncDependencies.forEach( s => {\n\t\t\t\tloadScript( s.src, s.callback );\n\t\t\t} );\n\t\t}\n\n\t\treturn Promise.resolve();\n\n\t}\n\n\t/**\n\t * Registers a new plugin with this reveal.js instance.\n\t *\n\t * reveal.js waits for all registered plugins to initialize\n\t * before considering itself ready, as long as the plugin\n\t * is registered before calling `Reveal.initialize()`.\n\t */\n\tregisterPlugin( plugin ) {\n\n\t\t// Backwards compatibility to make reveal.js ~3.9.0\n\t\t// plugins work with reveal.js 4.0.0\n\t\tif( arguments.length === 2 && typeof arguments[0] === 'string' ) {\n\t\t\tplugin = arguments[1];\n\t\t\tplugin.id = arguments[0];\n\t\t}\n\t\t// Plugin can optionally be a function which we call\n\t\t// to create an instance of the plugin\n\t\telse if( typeof plugin === 'function' ) {\n\t\t\tplugin = plugin();\n\t\t}\n\n\t\tlet id = plugin.id;\n\n\t\tif( typeof id !== 'string' ) {\n\t\t\tconsole.warn( 'Unrecognized plugin format; can\\'t find plugin.id', plugin );\n\t\t}\n\t\telse if( this.registeredPlugins[id] === undefined ) {\n\t\t\tthis.registeredPlugins[id] = plugin;\n\n\t\t\t// If a plugin is registered after reveal.js is loaded,\n\t\t\t// initialize it right away\n\t\t\tif( this.state === 'loaded' && typeof plugin.init === 'function' ) {\n\t\t\t\tplugin.init( this.Reveal );\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tconsole.warn( 'reveal.js: \"'+ id +'\" plugin has already been registered' );\n\t\t}\n\n\t}\n\n\t/**\n\t * Checks if a specific plugin has been registered.\n\t *\n\t * @param {String} id Unique plugin identifier\n\t */\n\thasPlugin( id ) {\n\n\t\treturn !!this.registeredPlugins[id];\n\n\t}\n\n\t/**\n\t * Returns the specific plugin instance, if a plugin\n\t * with the given ID has been registered.\n\t *\n\t * @param {String} id Unique plugin identifier\n\t */\n\tgetPlugin( id ) {\n\n\t\treturn this.registeredPlugins[id];\n\n\t}\n\n\tgetRegisteredPlugins() {\n\n\t\treturn this.registeredPlugins;\n\n\t}\n\n\tdestroy() {\n\n\t\tObject.values( this.registeredPlugins ).forEach( plugin => {\n\t\t\tif( typeof plugin.destroy === 'function' ) {\n\t\t\t\tplugin.destroy();\n\t\t\t}\n\t\t} );\n\n\t\tthis.registeredPlugins = {};\n\t\tthis.asyncDependencies = [];\n\n\t}\n\n}\n","import { SLIDES_SELECTOR } from '../utils/constants.js'\nimport { queryAll, createStyleSheet } from '../utils/util.js'\n\n/**\n * Setups up our presentation for printing/exporting to PDF.\n */\nexport default class Print {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t}\n\n\t/**\n\t * Configures the presentation for printing to a static\n\t * PDF.\n\t */\n\tasync setupPDF() {\n\n\t\tconst config = this.Reveal.getConfig();\n\t\tconst slides = queryAll( this.Reveal.getRevealElement(), SLIDES_SELECTOR )\n\n\t\t// Compute slide numbers now, before we start duplicating slides\n\t\tconst injectPageNumbers = config.slideNumber && /all|print/i.test( config.showSlideNumber );\n\n\t\tconst slideSize = this.Reveal.getComputedSlideSize( window.innerWidth, window.innerHeight );\n\n\t\t// Dimensions of the PDF pages\n\t\tconst pageWidth = Math.floor( slideSize.width * ( 1 + config.margin ) ),\n\t\t\tpageHeight = Math.floor( slideSize.height * ( 1 + config.margin ) );\n\n\t\t// Dimensions of slides within the pages\n\t\tconst slideWidth = slideSize.width,\n\t\t\tslideHeight = slideSize.height;\n\n\t\tawait new Promise( requestAnimationFrame );\n\n\t\t// Let the browser know what page size we want to print\n\t\tcreateStyleSheet( '@page{size:'+ pageWidth +'px '+ pageHeight +'px; margin: 0px;}' );\n\n\t\t// Limit the size of certain elements to the dimensions of the slide\n\t\tcreateStyleSheet( '.reveal section>img, .reveal section>video, .reveal section>iframe{max-width: '+ slideWidth +'px; max-height:'+ slideHeight +'px}' );\n\n\t\tdocument.documentElement.classList.add( 'print-pdf' );\n\t\tdocument.body.style.width = pageWidth + 'px';\n\t\tdocument.body.style.height = pageHeight + 'px';\n\n\t\tconst viewportElement = document.querySelector( '.reveal-viewport' );\n\t\tlet presentationBackground;\n\t\tif( viewportElement ) {\n\t\t\tconst viewportStyles = window.getComputedStyle( viewportElement );\n\t\t\tif( viewportStyles && viewportStyles.background ) {\n\t\t\t\tpresentationBackground = viewportStyles.background;\n\t\t\t}\n\t\t}\n\n\t\t// Make sure stretch elements fit on slide\n\t\tawait new Promise( requestAnimationFrame );\n\t\tthis.Reveal.layoutSlideContents( slideWidth, slideHeight );\n\n\t\t// Batch scrollHeight access to prevent layout thrashing\n\t\tawait new Promise( requestAnimationFrame );\n\n\t\tconst slideScrollHeights = slides.map( slide => slide.scrollHeight );\n\n\t\tconst pages = [];\n\t\tconst pageContainer = slides[0].parentNode;\n\t\tlet slideNumber = 1;\n\n\t\t// Slide and slide background layout\n\t\tslides.forEach( function( slide, index ) {\n\n\t\t\t// Vertical stacks are not centred since their section\n\t\t\t// children will be\n\t\t\tif( slide.classList.contains( 'stack' ) === false ) {\n\t\t\t\t// Center the slide inside of the page, giving the slide some margin\n\t\t\t\tlet left = ( pageWidth - slideWidth ) / 2;\n\t\t\t\tlet top = ( pageHeight - slideHeight ) / 2;\n\n\t\t\t\tconst contentHeight = slideScrollHeights[ index ];\n\t\t\t\tlet numberOfPages = Math.max( Math.ceil( contentHeight / pageHeight ), 1 );\n\n\t\t\t\t// Adhere to configured pages per slide limit\n\t\t\t\tnumberOfPages = Math.min( numberOfPages, config.pdfMaxPagesPerSlide );\n\n\t\t\t\t// Center slides vertically\n\t\t\t\tif( numberOfPages === 1 && config.center || slide.classList.contains( 'center' ) ) {\n\t\t\t\t\ttop = Math.max( ( pageHeight - contentHeight ) / 2, 0 );\n\t\t\t\t}\n\n\t\t\t\t// Wrap the slide in a page element and hide its overflow\n\t\t\t\t// so that no page ever flows onto another\n\t\t\t\tconst page = document.createElement( 'div' );\n\t\t\t\tpages.push( page );\n\n\t\t\t\tpage.className = 'pdf-page';\n\t\t\t\tpage.style.height = ( ( pageHeight + config.pdfPageHeightOffset ) * numberOfPages ) + 'px';\n\n\t\t\t\t// Copy the presentation-wide background to each individual\n\t\t\t\t// page when printing\n\t\t\t\tif( presentationBackground ) {\n\t\t\t\t\tpage.style.background = presentationBackground;\n\t\t\t\t}\n\n\t\t\t\tpage.appendChild( slide );\n\n\t\t\t\t// Position the slide inside of the page\n\t\t\t\tslide.style.left = left + 'px';\n\t\t\t\tslide.style.top = top + 'px';\n\t\t\t\tslide.style.width = slideWidth + 'px';\n\n\t\t\t\tthis.Reveal.slideContent.layout( slide );\n\n\t\t\t\tif( slide.slideBackgroundElement ) {\n\t\t\t\t\tpage.insertBefore( slide.slideBackgroundElement, slide );\n\t\t\t\t}\n\n\t\t\t\t// Inject notes if `showNotes` is enabled\n\t\t\t\tif( config.showNotes ) {\n\n\t\t\t\t\t// Are there notes for this slide?\n\t\t\t\t\tconst notes = this.Reveal.getSlideNotes( slide );\n\t\t\t\t\tif( notes ) {\n\n\t\t\t\t\t\tconst notesSpacing = 8;\n\t\t\t\t\t\tconst notesLayout = typeof config.showNotes === 'string' ? config.showNotes : 'inline';\n\t\t\t\t\t\tconst notesElement = document.createElement( 'div' );\n\t\t\t\t\t\tnotesElement.classList.add( 'speaker-notes' );\n\t\t\t\t\t\tnotesElement.classList.add( 'speaker-notes-pdf' );\n\t\t\t\t\t\tnotesElement.setAttribute( 'data-layout', notesLayout );\n\t\t\t\t\t\tnotesElement.innerHTML = notes;\n\n\t\t\t\t\t\tif( notesLayout === 'separate-page' ) {\n\t\t\t\t\t\t\tpages.push( notesElement );\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tnotesElement.style.left = notesSpacing + 'px';\n\t\t\t\t\t\t\tnotesElement.style.bottom = notesSpacing + 'px';\n\t\t\t\t\t\t\tnotesElement.style.width = ( pageWidth - notesSpacing*2 ) + 'px';\n\t\t\t\t\t\t\tpage.appendChild( notesElement );\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\t// Inject page numbers if `slideNumbers` are enabled\n\t\t\t\tif( injectPageNumbers ) {\n\t\t\t\t\tconst numberElement = document.createElement( 'div' );\n\t\t\t\t\tnumberElement.classList.add( 'slide-number' );\n\t\t\t\t\tnumberElement.classList.add( 'slide-number-pdf' );\n\t\t\t\t\tnumberElement.innerHTML = slideNumber++;\n\t\t\t\t\tpage.appendChild( numberElement );\n\t\t\t\t}\n\n\t\t\t\t// Copy page and show fragments one after another\n\t\t\t\tif( config.pdfSeparateFragments ) {\n\n\t\t\t\t\t// Each fragment 'group' is an array containing one or more\n\t\t\t\t\t// fragments. Multiple fragments that appear at the same time\n\t\t\t\t\t// are part of the same group.\n\t\t\t\t\tconst fragmentGroups = this.Reveal.fragments.sort( page.querySelectorAll( '.fragment' ), true );\n\n\t\t\t\t\tlet previousFragmentStep;\n\n\t\t\t\t\tfragmentGroups.forEach( function( fragments, index ) {\n\n\t\t\t\t\t\t// Remove 'current-fragment' from the previous group\n\t\t\t\t\t\tif( previousFragmentStep ) {\n\t\t\t\t\t\t\tpreviousFragmentStep.forEach( function( fragment ) {\n\t\t\t\t\t\t\t\tfragment.classList.remove( 'current-fragment' );\n\t\t\t\t\t\t\t} );\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Show the fragments for the current index\n\t\t\t\t\t\tfragments.forEach( function( fragment ) {\n\t\t\t\t\t\t\tfragment.classList.add( 'visible', 'current-fragment' );\n\t\t\t\t\t\t}, this );\n\n\t\t\t\t\t\t// Create a separate page for the current fragment state\n\t\t\t\t\t\tconst clonedPage = page.cloneNode( true );\n\n\t\t\t\t\t\t// Inject unique page numbers for fragments\n\t\t\t\t\t\tif( injectPageNumbers ) {\n\t\t\t\t\t\t\tconst numberElement = clonedPage.querySelector( '.slide-number-pdf' );\n\t\t\t\t\t\t\tconst fragmentNumber = index + 1;\n\t\t\t\t\t\t\tnumberElement.innerHTML += '.' + fragmentNumber;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tpages.push( clonedPage );\n\n\t\t\t\t\t\tpreviousFragmentStep = fragments;\n\n\t\t\t\t\t}, this );\n\n\t\t\t\t\t// Reset the first/original page so that all fragments are hidden\n\t\t\t\t\tfragmentGroups.forEach( function( fragments ) {\n\t\t\t\t\t\tfragments.forEach( function( fragment ) {\n\t\t\t\t\t\t\tfragment.classList.remove( 'visible', 'current-fragment' );\n\t\t\t\t\t\t} );\n\t\t\t\t\t} );\n\n\t\t\t\t}\n\t\t\t\t// Show all fragments\n\t\t\t\telse {\n\t\t\t\t\tqueryAll( page, '.fragment:not(.fade-out)' ).forEach( function( fragment ) {\n\t\t\t\t\t\tfragment.classList.add( 'visible' );\n\t\t\t\t\t} );\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}, this );\n\n\t\tawait new Promise( requestAnimationFrame );\n\n\t\tpages.forEach( page => pageContainer.appendChild( page ) );\n\n\t\t// Re-run JS-based content layout after the slide is added to page DOM\n\t\tthis.Reveal.slideContent.layout( this.Reveal.getSlidesElement() );\n\n\t\t// Notify subscribers that the PDF layout is good to go\n\t\tthis.Reveal.dispatchEvent({ type: 'pdf-ready' });\n\n\t}\n\n\t/**\n\t * Checks if this instance is being used to print a PDF.\n\t */\n\tisPrintingPDF() {\n\n\t\treturn ( /print-pdf/gi ).test( window.location.search );\n\n\t}\n\n}\n","import { isAndroid } from '../utils/device.js'\nimport { matches } from '../utils/util.js'\n\nconst SWIPE_THRESHOLD = 40;\n\n/**\n * Controls all touch interactions and navigations for\n * a presentation.\n */\nexport default class Touch {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\t// Holds information about the currently ongoing touch interaction\n\t\tthis.touchStartX = 0;\n\t\tthis.touchStartY = 0;\n\t\tthis.touchStartCount = 0;\n\t\tthis.touchCaptured = false;\n\n\t\tthis.onPointerDown = this.onPointerDown.bind( this );\n\t\tthis.onPointerMove = this.onPointerMove.bind( this );\n\t\tthis.onPointerUp = this.onPointerUp.bind( this );\n\t\tthis.onTouchStart = this.onTouchStart.bind( this );\n\t\tthis.onTouchMove = this.onTouchMove.bind( this );\n\t\tthis.onTouchEnd = this.onTouchEnd.bind( this );\n\n\t}\n\n\t/**\n\t *\n\t */\n\tbind() {\n\n\t\tlet revealElement = this.Reveal.getRevealElement();\n\n\t\tif( 'onpointerdown' in window ) {\n\t\t\t// Use W3C pointer events\n\t\t\trevealElement.addEventListener( 'pointerdown', this.onPointerDown, false );\n\t\t\trevealElement.addEventListener( 'pointermove', this.onPointerMove, false );\n\t\t\trevealElement.addEventListener( 'pointerup', this.onPointerUp, false );\n\t\t}\n\t\telse if( window.navigator.msPointerEnabled ) {\n\t\t\t// IE 10 uses prefixed version of pointer events\n\t\t\trevealElement.addEventListener( 'MSPointerDown', this.onPointerDown, false );\n\t\t\trevealElement.addEventListener( 'MSPointerMove', this.onPointerMove, false );\n\t\t\trevealElement.addEventListener( 'MSPointerUp', this.onPointerUp, false );\n\t\t}\n\t\telse {\n\t\t\t// Fall back to touch events\n\t\t\trevealElement.addEventListener( 'touchstart', this.onTouchStart, false );\n\t\t\trevealElement.addEventListener( 'touchmove', this.onTouchMove, false );\n\t\t\trevealElement.addEventListener( 'touchend', this.onTouchEnd, false );\n\t\t}\n\n\t}\n\n\t/**\n\t *\n\t */\n\tunbind() {\n\n\t\tlet revealElement = this.Reveal.getRevealElement();\n\n\t\trevealElement.removeEventListener( 'pointerdown', this.onPointerDown, false );\n\t\trevealElement.removeEventListener( 'pointermove', this.onPointerMove, false );\n\t\trevealElement.removeEventListener( 'pointerup', this.onPointerUp, false );\n\n\t\trevealElement.removeEventListener( 'MSPointerDown', this.onPointerDown, false );\n\t\trevealElement.removeEventListener( 'MSPointerMove', this.onPointerMove, false );\n\t\trevealElement.removeEventListener( 'MSPointerUp', this.onPointerUp, false );\n\n\t\trevealElement.removeEventListener( 'touchstart', this.onTouchStart, false );\n\t\trevealElement.removeEventListener( 'touchmove', this.onTouchMove, false );\n\t\trevealElement.removeEventListener( 'touchend', this.onTouchEnd, false );\n\n\t}\n\n\t/**\n\t * Checks if the target element prevents the triggering of\n\t * swipe navigation.\n\t */\n\tisSwipePrevented( target ) {\n\n\t\t// Prevent accidental swipes when scrubbing timelines\n\t\tif( matches( target, 'video, audio' ) ) return true;\n\n\t\twhile( target && typeof target.hasAttribute === 'function' ) {\n\t\t\tif( target.hasAttribute( 'data-prevent-swipe' ) ) return true;\n\t\t\ttarget = target.parentNode;\n\t\t}\n\n\t\treturn false;\n\n\t}\n\n\t/**\n\t * Handler for the 'touchstart' event, enables support for\n\t * swipe and pinch gestures.\n\t *\n\t * @param {object} event\n\t */\n\tonTouchStart( event ) {\n\n\t\tif( this.isSwipePrevented( event.target ) ) return true;\n\n\t\tthis.touchStartX = event.touches[0].clientX;\n\t\tthis.touchStartY = event.touches[0].clientY;\n\t\tthis.touchStartCount = event.touches.length;\n\n\t}\n\n\t/**\n\t * Handler for the 'touchmove' event.\n\t *\n\t * @param {object} event\n\t */\n\tonTouchMove( event ) {\n\n\t\tif( this.isSwipePrevented( event.target ) ) return true;\n\n\t\tlet config = this.Reveal.getConfig();\n\n\t\t// Each touch should only trigger one action\n\t\tif( !this.touchCaptured ) {\n\t\t\tthis.Reveal.onUserInput( event );\n\n\t\t\tlet currentX = event.touches[0].clientX;\n\t\t\tlet currentY = event.touches[0].clientY;\n\n\t\t\t// There was only one touch point, look for a swipe\n\t\t\tif( event.touches.length === 1 && this.touchStartCount !== 2 ) {\n\n\t\t\t\tlet availableRoutes = this.Reveal.availableRoutes({ includeFragments: true });\n\n\t\t\t\tlet deltaX = currentX - this.touchStartX,\n\t\t\t\t\tdeltaY = currentY - this.touchStartY;\n\n\t\t\t\tif( deltaX > SWIPE_THRESHOLD && Math.abs( deltaX ) > Math.abs( deltaY ) ) {\n\t\t\t\t\tthis.touchCaptured = true;\n\t\t\t\t\tif( config.navigationMode === 'linear' ) {\n\t\t\t\t\t\tif( config.rtl ) {\n\t\t\t\t\t\t\tthis.Reveal.next();\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tthis.Reveal.prev();\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis.Reveal.left();\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if( deltaX < -SWIPE_THRESHOLD && Math.abs( deltaX ) > Math.abs( deltaY ) ) {\n\t\t\t\t\tthis.touchCaptured = true;\n\t\t\t\t\tif( config.navigationMode === 'linear' ) {\n\t\t\t\t\t\tif( config.rtl ) {\n\t\t\t\t\t\t\tthis.Reveal.prev();\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\tthis.Reveal.next();\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis.Reveal.right();\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if( deltaY > SWIPE_THRESHOLD && availableRoutes.up ) {\n\t\t\t\t\tthis.touchCaptured = true;\n\t\t\t\t\tif( config.navigationMode === 'linear' ) {\n\t\t\t\t\t\tthis.Reveal.prev();\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis.Reveal.up();\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if( deltaY < -SWIPE_THRESHOLD && availableRoutes.down ) {\n\t\t\t\t\tthis.touchCaptured = true;\n\t\t\t\t\tif( config.navigationMode === 'linear' ) {\n\t\t\t\t\t\tthis.Reveal.next();\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tthis.Reveal.down();\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// If we're embedded, only block touch events if they have\n\t\t\t\t// triggered an action\n\t\t\t\tif( config.embedded ) {\n\t\t\t\t\tif( this.touchCaptured || this.Reveal.isVerticalSlide() ) {\n\t\t\t\t\t\tevent.preventDefault();\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Not embedded? Block them all to avoid needless tossing\n\t\t\t\t// around of the viewport in iOS\n\t\t\t\telse {\n\t\t\t\t\tevent.preventDefault();\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\t// There's a bug with swiping on some Android devices unless\n\t\t// the default action is always prevented\n\t\telse if( isAndroid ) {\n\t\t\tevent.preventDefault();\n\t\t}\n\n\t}\n\n\t/**\n\t * Handler for the 'touchend' event.\n\t *\n\t * @param {object} event\n\t */\n\tonTouchEnd( event ) {\n\n\t\tthis.touchCaptured = false;\n\n\t}\n\n\t/**\n\t * Convert pointer down to touch start.\n\t *\n\t * @param {object} event\n\t */\n\tonPointerDown( event ) {\n\n\t\tif( event.pointerType === event.MSPOINTER_TYPE_TOUCH || event.pointerType === \"touch\" ) {\n\t\t\tevent.touches = [{ clientX: event.clientX, clientY: event.clientY }];\n\t\t\tthis.onTouchStart( event );\n\t\t}\n\n\t}\n\n\t/**\n\t * Convert pointer move to touch move.\n\t *\n\t * @param {object} event\n\t */\n\tonPointerMove( event ) {\n\n\t\tif( event.pointerType === event.MSPOINTER_TYPE_TOUCH || event.pointerType === \"touch\" ) {\n\t\t\tevent.touches = [{ clientX: event.clientX, clientY: event.clientY }];\n\t\t\tthis.onTouchMove( event );\n\t\t}\n\n\t}\n\n\t/**\n\t * Convert pointer up to touch end.\n\t *\n\t * @param {object} event\n\t */\n\tonPointerUp( event ) {\n\n\t\tif( event.pointerType === event.MSPOINTER_TYPE_TOUCH || event.pointerType === \"touch\" ) {\n\t\t\tevent.touches = [{ clientX: event.clientX, clientY: event.clientY }];\n\t\t\tthis.onTouchEnd( event );\n\t\t}\n\n\t}\n\n}","import { closest } from '../utils/util.js'\n\n/**\n * Manages focus when a presentation is embedded. This\n * helps us only capture keyboard from the presentation\n * a user is currently interacting with in a page where\n * multiple presentations are embedded.\n */\n\nconst STATE_FOCUS = 'focus';\nconst STATE_BLUR = 'blur';\n\nexport default class Focus {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t\tthis.onRevealPointerDown = this.onRevealPointerDown.bind( this );\n\t\tthis.onDocumentPointerDown = this.onDocumentPointerDown.bind( this );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tif( config.embedded ) {\n\t\t\tthis.blur();\n\t\t}\n\t\telse {\n\t\t\tthis.focus();\n\t\t\tthis.unbind();\n\t\t}\n\n\t}\n\n\tbind() {\n\n\t\tif( this.Reveal.getConfig().embedded ) {\n\t\t\tthis.Reveal.getRevealElement().addEventListener( 'pointerdown', this.onRevealPointerDown, false );\n\t\t}\n\n\t}\n\n\tunbind() {\n\n\t\tthis.Reveal.getRevealElement().removeEventListener( 'pointerdown', this.onRevealPointerDown, false );\n\t\tdocument.removeEventListener( 'pointerdown', this.onDocumentPointerDown, false );\n\n\t}\n\n\tfocus() {\n\n\t\tif( this.state !== STATE_FOCUS ) {\n\t\t\tthis.Reveal.getRevealElement().classList.add( 'focused' );\n\t\t\tdocument.addEventListener( 'pointerdown', this.onDocumentPointerDown, false );\n\t\t}\n\n\t\tthis.state = STATE_FOCUS;\n\n\t}\n\n\tblur() {\n\n\t\tif( this.state !== STATE_BLUR ) {\n\t\t\tthis.Reveal.getRevealElement().classList.remove( 'focused' );\n\t\t\tdocument.removeEventListener( 'pointerdown', this.onDocumentPointerDown, false );\n\t\t}\n\n\t\tthis.state = STATE_BLUR;\n\n\t}\n\n\tisFocused() {\n\n\t\treturn this.state === STATE_FOCUS;\n\n\t}\n\n\tdestroy() {\n\n\t\tthis.Reveal.getRevealElement().classList.remove( 'focused' );\n\n\t}\n\n\tonRevealPointerDown( event ) {\n\n\t\tthis.focus();\n\n\t}\n\n\tonDocumentPointerDown( event ) {\n\n\t\tlet revealElement = closest( event.target, '.reveal' );\n\t\tif( !revealElement || revealElement !== this.Reveal.getRevealElement() ) {\n\t\t\tthis.blur();\n\t\t}\n\n\t}\n\n}","/**\n * Handles the showing of speaker notes\n */\nexport default class Notes {\n\n\tconstructor( Reveal ) {\n\n\t\tthis.Reveal = Reveal;\n\n\t}\n\n\trender() {\n\n\t\tthis.element = document.createElement( 'div' );\n\t\tthis.element.className = 'speaker-notes';\n\t\tthis.element.setAttribute( 'data-prevent-swipe', '' );\n\t\tthis.element.setAttribute( 'tabindex', '0' );\n\t\tthis.Reveal.getRevealElement().appendChild( this.element );\n\n\t}\n\n\t/**\n\t * Called when the reveal.js config is updated.\n\t */\n\tconfigure( config, oldConfig ) {\n\n\t\tif( config.showNotes ) {\n\t\t\tthis.element.setAttribute( 'data-layout', typeof config.showNotes === 'string' ? config.showNotes : 'inline' );\n\t\t}\n\n\t}\n\n\t/**\n\t * Pick up notes from the current slide and display them\n\t * to the viewer.\n\t *\n\t * @see {@link config.showNotes}\n\t */\n\tupdate() {\n\n\t\tif( this.Reveal.getConfig().showNotes && this.element && this.Reveal.getCurrentSlide() && !this.Reveal.print.isPrintingPDF() ) {\n\n\t\t\tthis.element.innerHTML = this.getSlideNotes() || 'No notes on this slide.';\n\n\t\t}\n\n\t}\n\n\t/**\n\t * Updates the visibility of the speaker notes sidebar that\n\t * is used to share annotated slides. The notes sidebar is\n\t * only visible if showNotes is true and there are notes on\n\t * one or more slides in the deck.\n\t */\n\tupdateVisibility() {\n\n\t\tif( this.Reveal.getConfig().showNotes && this.hasNotes() && !this.Reveal.print.isPrintingPDF() ) {\n\t\t\tthis.Reveal.getRevealElement().classList.add( 'show-notes' );\n\t\t}\n\t\telse {\n\t\t\tthis.Reveal.getRevealElement().classList.remove( 'show-notes' );\n\t\t}\n\n\t}\n\n\t/**\n\t * Checks if there are speaker notes for ANY slide in the\n\t * presentation.\n\t */\n\thasNotes() {\n\n\t\treturn this.Reveal.getSlidesElement().querySelectorAll( '[data-notes], aside.notes' ).length > 0;\n\n\t}\n\n\t/**\n\t * Checks if this presentation is running inside of the\n\t * speaker notes window.\n\t *\n\t * @return {boolean}\n\t */\n\tisSpeakerNotesWindow() {\n\n\t\treturn !!window.location.search.match( /receiver/gi );\n\n\t}\n\n\t/**\n\t * Retrieves the speaker notes from a slide. Notes can be\n\t * defined in two ways:\n\t * 1. As a data-notes attribute on the slide
\n\t * 2. With