Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions ansible/check_indexing_service.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#Issue setup ip address inside the hpa config file for postgres to accept the connection from it
- name: Check indexing
hosts: "{{ idr_environment | default('idr') }}-searchengine-hosts"

tasks:
- name: Get infos on container
become: yes
docker_container_info:
name: "{{ searchengine_index }}"
register: result

- name: does Indexing exisit?
debug:
msg: "The indexing container {{ 'exists' if result.exists else 'does not exist' }}"

- name: did Indexing finish?
debug:
msg: "The indexing container status is {{ result.container['State']['Status'] }}"
when: result.exists


17 changes: 17 additions & 0 deletions ansible/group_vars/searchengine-hosts.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apps_folder: /data
#database_server_url: "{{ omero_db_host_ansible }}"
database_port: 5432
database_name: idr
database_username: omeroreadonly
database_user_password: "{{ idr_secret_postgresql_password_ro | default('omero') }}"
searchenginecache_folder: /data/searchengine/searchengine/cacheddata/
search_engineelasticsearch_docker_image: docker.elastic.co/elasticsearch/elasticsearch:7.16.2
searchengine_docker_image: openmicroscopy/omero-searchengine:latest
searchengineclient_docker_image: openmicroscopy/omero-searchengineclient:latest
#ansible_python_interpreter: path/to/bin/python
searchengine_index: searchengine_index
cache_rows: 100000
# I think that the following two variables should be in secret
searchengine_secret_key: "fagfdssf3fgdnvhg56ghhgfhgfgh45f"
searchengineclient_secret_key: "gfdgfdggregb3tyttnmnymytmasfd"
searchengineurlprefix: "searchengineapi"
299 changes: 299 additions & 0 deletions ansible/idr-searchengine.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,299 @@
# Search ngine + Search Engine Client + Elasticsearch

- hosts: "{{ idr_environment | default('idr') }}-database-hosts"

- name: Deploying search engine
hosts: "{{ idr_environment | default('idr') }}-searchengine-hosts"

tasks:
- name: Get database host
set_fact:
database_server_url: "{{ hostvars[groups[idr_environment | default('idr') + '-database-hosts'][0]]['ansible_' + (idr_net_iface | default('eth0'))]['ipv4']['address']}}"
- name: Create app top level directory
become: yes
file:
path: "{{ apps_folder }}/searchengine"
recurse: yes
state: directory
owner: root
group: root

- name: Create searchengine folder directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/searchengine"
recurse: yes
state: directory
owner: root
group: root

- name: Create searchengine logs directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/searchengine/logs"
state: directory

- name: Create searchengine cached directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/searchengine/cacheddata"
state: directory

- name: Create client directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/client"
recurse: yes
state: directory
owner: root
group: root

- name: Create client data directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/client/app_data"
recurse: yes
state: directory
owner: root
group: root

- name: Create client logs directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/client/logs"
state: directory

- name: Create elasticsearch directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/elasticsearch"
state: directory
# User id in elasticsearch Docker image
owner: 1000
group: root

- name: Create elasticsearch logs directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/elasticsearch/logs"
state: directory
# User id in elasticsearch Docker image
owner: 1000
group: root

- name: Create elasticsearch data directory
become: yes
file:
path: "{{ apps_folder }}/searchengine/elasticsearch{{ apps_folder }}"
state: directory
# User id in elasticsearch Docker image
owner: 1000
group: root

- name: Create docker network
become: yes
docker_network:
name: searchengine-net
ipam_config:
- subnet=10.11.0.0/16
#networks_cli_compatible: yes
#state: present

#- name: Pull search engine docker image
# docker_image:
# name: "{{ search_engineicsearch_docker_image }}"
# tag: latest
# source: pull

- name: Run docker elasticsearch
become: yes
docker_container:
image: "{{ search_engineelasticsearch_docker_image }}"
name: searchengineelasticsearch
cleanup: True
env:
discovery.type: single-node
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
cluster.name: docker-cluster
http.host: 0.0.0.0
#http.port: 9200
discovery.type: single-node
ES_JAVA_OPTS: "-Xmx4096m"
networks:
- name: searchengine-net
published_ports:
- "9201:9200"
- "9301:9300"
state: started
restart_policy: always
volumes:
- "{{ apps_folder }}/searchengine/elasticsearch{{ apps_folder }}:/var/lib/elasticsearch"
- "{{ apps_folder }}/searchengine/elasticsearch/logs:/var/log/elasticsearch"
-
- name: configure elasticsearch for docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_elasticsearch
cleanup: True
#auto_remove: yes
command: "set_elasticsearch_configuration -e searchengineelasticsearch"
#networks:
#- name: searchengine-net
#published_ports:
#- "5577:5577"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"


- name: configure database for docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_database
cleanup: True
#auto_remove: yes
command: "set_database_configuration -u {{ database_server_url }} -d {{ database_name }} -s {{ database_port }} -n {{ database_username }} -p {{ database_user_password }}"
#networks:
#- name: searchengine-net
#published_ports:
#- "5577:5577"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"

- name: configure cache folder for docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_cache
cleanup: True
#auto_remove: yes
command: "set_cache_folder -c /etc/searchengine/cachedata"
#networks:
#- name: searchengine-net
#published_ports:
#- "5577:5577"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"

- name: configure number of cache rows for docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_cache
cleanup: True
#auto_remove: yes
command: "set_cache_rows_number -n {{ cache_rows }}"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"

- name: configure secret key for docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_cache
cleanup: True
#auto_remove: yes
command: "set_searchengine_secret_key -s {{ searchengine_secret_key }}"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"

- name: create elasticsearch all indcies for docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_create_index
cleanup: True
#auto_remove: yes
command: create_index
networks:
- name: searchengine-net
#published_ports:
#- "5577:5577"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"

- name: configure search engine url for search client
become: yes
docker_container:
image: "{{ searchengineclient_docker_image }}"
name: searchengineclient_search_uri
cleanup: True
#auto_remove: yes
command: "set_searchengine_url -u http://searchengine:5577/"# http://127.0.0.1:5556/"
#networks:
#- name: searchengine-net
#published_ports:
#- "5567:5567"
state: started
volumes:
- "{{ apps_folder }}/searchengine/client:/etc/searchengineclient/"

- name: configure secret key for search client
become: yes
docker_container:
image: "{{ searchengineclient_docker_image }}"
name: searchengineclient_search_uri
cleanup: True
#auto_remove: yes
command: "set_client_secret_key -s {{ searchengineclient_secret_key }}"
state: started
volumes:
- "{{ apps_folder }}/searchengine/client:/etc/searchengineclient/"

- name: configure app data folder for search client
become: yes
docker_container:
image: "{{ searchengineclient_docker_image }}"
name: searchengineclient_search_uri
cleanup: True
#auto_remove: yes
command: "set_app_data_folder -a /etc/searchengineclient/app_data"
state: started
volumes:
- "{{ apps_folder }}/searchengine/client:/etc/searchengineclient/"


- name: Run docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image}}"
name: searchengine
cleanup: True
command: "run_app {{ searchengineurlprefix }}"
networks:
- name: searchengine-net
published_ports:
- "5577:5577"
#restart: "{{ searchengine_conf_status | changed }}"
state: started
restart_policy: always
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"
- "{{ apps_folder }}/searchengine/searchengine/cachedata:/etc/searchengine/cachedata"


- name: Run docker searchengineclient
become: yes
docker_container:
image: "{{ searchengineclient_docker_image }}"
name: searchengineclient
cleanup: True
networks:
- name: searchengine-net
published_ports:
- "5567:5567"
#restart: "{{ searchengineclient_conf_status | changed }}"
state: started
restart_policy: always
volumes:
- "{{ apps_folder }}/searchengine/client:/etc/searchengineclient/"
- "{{ apps_folder }}/searchengine/client/app_data:/etc/searchengineclient/app_data"
23 changes: 23 additions & 0 deletions ansible/run_searchengine_index_service.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#Issue setup ip address inside the hpa config file for postgres to accept the connection from it
- name: Deploying search engine cache and indexing
hosts: "{{ idr_environment | default('idr') }}-searchengine-hosts"

tasks:

- name: Get data from postgres database and insert them to Elasticsearch index using docker searchengine
become: yes
docker_container:
image: "{{ searchengine_docker_image }}"
name: searchengine_index
cleanup: True
#auto_remove: yes
command: "get_index_data_from_database"
networks:
- name: searchengine-net
ipv4_address: 10.11.0.11
published_ports:
- "5571:5577"
state: started
volumes:
- "{{ apps_folder }}/searchengine/searchengine/:/etc/searchengine/"

20 changes: 20 additions & 0 deletions docs/searchengine_deployemnt.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
Searchengine installation and configuration using Ansible:
==========================================================

There is an ansible playbook (management-searchengine.yml) that has been written to deploy the apps:
* It will config and run searchengine, Elasticsearch and searchengine client
* It will configure and create the required folders
* It will configure the three apps and run them
* There is a variables file (groups_vars/management-hosts.yml) that the user needs to edit before running the playbook
* The variable names are self-explained
* To check that the apps have been installed and run, the user can use wget or curl to call:
* for searchengine, http://127.0.0.1:5556/api/v2/resources/
* for searchengine client, http://127.0.0.1:5556
* for Elasticsearch, http://127.0.0.1:9201
* After deploying the apps using the playbook, it is needed to run another playbook for caching and indexing:
* run_searchengine_index_cache_services.yml
* If the Postgresql database server is located at the same machine which hosts the searchengine, it is needed to:
* Edit pg_hba.conf file (one of the postgresql configuration files) and add two client ips (i.e. 10.11.0.10 and 10.11.0.11)
* Reload the configuration; so the PostgreSQL accepts the connection from indexing and caching services.
* As the indexing processe takes a long time, there is a playbooks that enable the user to check if it have finished or not:
* check_indexing_service.yml