-
Notifications
You must be signed in to change notification settings - Fork 113
/
.studiorc
295 lines (241 loc) · 10.2 KB
/
.studiorc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
#!/bin/bash
#shellcheck disable=SC2034
export HAB_ORIGIN=${HAB_ORIGIN:-chef}
# Bring studio-common to life
RECOMMENDED_HAB_VERSION="1.6.1205"
GOLANGCILINTVERSION=1.60.1
# TODO(ssd) 2019-11-19: https://github.com/habitat-sh/habitat/issues/7219
unset SSL_CERT_FILE
# TODO (tc) Let's get rid of this hard-coded config long-term.
# Opensearch
# TODO (tc) This should be https once certs are added, at which point
# the HTTP calls in the mocha tests that talk to ES in ingest-service
# will need to be updated to pass certs along.
export ES_VERSION=5.6.4
export OPENSEARCH_URL="http://localhost:10144"
export POSTGRESQL_URL="127.0.0.1:5432"
# Ingest
export INGEST_URL="https://localhost:10122"
export INGEST_HOST="localhost"
export INGEST_PORT=10122
# Automate Gateway service
export GATEWAY_URL="https://localhost:2000"
export GATEWAY_HOST="localhost"
export GATEWAY_PORT=2000
# Compiling with cgo require gcc to be in our path. We don't typically
# use cgo so we set CGO_ENABLED=0 by default in the studio.
export CGO_ENABLED=0
export GOPROXY="https://proxy.golang.org,direct"
export GOSUMDB="sum.golang.org"
# Go >= 1.13 will assume this but we have tools like protoc extensions that
# look for it.
export GO111MODULE=on
# Specify where to put 'go installed' binaries
setup_gobin() {
local vendor_hash
vendor_hash=$(cat \
/src/go.sum \
/src/components/automate-grpc/protoc-gen-policy/policy/policy.go \
/src/api/external/annotations/iam/policy.proto \
| sha256sum | cut -c1-16)
export GOBIN_BASE="/src/bin"
GOBIN="${GOBIN_BASE}/${vendor_hash}"
export GOBIN
mkdir -p "$GOBIN"
}
setup_gobin
# Make 'go installed' binaries available in the PATH
export PATH=$PATH:$GOBIN
# Disable vendor mode
export GOFLAGS=""
# Delve Server port for remote debugging
export GO_DEBUG_PORT=2345
# Depot channel for Chef micro-services
export CHEF_CHANNEL=dev
# Flag that will allow us detect when are we in dev mode inside any a2 component
export CHEF_DEV_ENVIRONMENT=true
# minimal(fast) hart compression level for dev
export HAB_HART_COMPRESSION_LEVEL=0
mkdir -p /tmp/docs /tmp/aliases
# Source all the files available in the .studio directory of the /src directory
# Only if the '.studio/' directory exists and it is not empty
if ls -A /src/.studio &>/dev/null; then
for file in /src/.studio/*; do
# shellcheck disable=SC1090
source "$file"
done
source_msg="$source_msg and your local .studio folder";
fi
# Install busybox-static so script evaluations (e.g. #!/bin/bash) will work
# This ensures normal linux studios work like Docker studios
log_line "Install busybox-static & bash hart packages"
hab pkg install core/busybox-static >/dev/null
hab pkg binlink -d /bin core/busybox-static >/dev/null
hab pkg install core/bash >/dev/null
hab pkg binlink -d /bin/ -f core/bash >/dev/null
getting_started << GETTING_STARTED
Welcome to the Habitat-based development environment for the Automate mono-repo!
===== Deploy Automate =====
To get started, use this to start the deployment service, download the latest 'dev' images for
each service, and spin up Automate. (when you don't need local UI development)
# start_all_services
If you need to develop against the UI.
# build components/automate-ui-devproxy/
# start_automate_ui_background
# start_all_services
See ./dev-docs/ui-development.md for additional information.
===== Rebuild Components and Hot-load the Results =====
After Automate is running, you can rebuild an entire component's hab package from source,
then HOT LOAD IT INTO YOUR DEPLOY, replacing the dev image that was there previously.
(Use 'build' instead of 'rebuild' if you want to check dependencies also.)
# rebuild components/<COMPONENT>
===== Rebuild just the Go Binary =====
If you have Go changes but no hab package changes, you can rebuild just the Go binary for your component.
This works for both hab packages you've already built AND packages initially downloaded from the chef depot.
This is slightly faster than 'rebuild' described above, but 'hab sup status' will not show a local origin
like 'rebuild' does (so you won't be able to tell at a glance which components you have built locally).
# go_update_component COMPONENT_NAME
===== Helpful Commands =====
Get a fully-permissioned token with 'get_admin_token'. It's also idempotent,
so you'll get the same token every time after the first run and can do things like:
# curl --insecure -H "api-token \$(get_admin_token)" https://localhost/api/v0/version
Load various data sets with 'chef_load_actions' or 'chef_load_nodes'.
For a complete walkthrough of the dev environment, see ./README.md and ./dev-docs/DEV_ENVIRONMENT.md.
Also try 'describe' at the hab studio prompt for a summary of available commands.
GETTING_STARTED
# Memory check. Because we all always forget to change the docker preferences
# when we re-install it
total_memory_kb=$(grep MemTotal /proc/meminfo | grep -o -E '[[:digit:]]+')
# 8 gigs == 8164340kb, subtract a few MB so we can just do a less than
# comp and to account for the fact that MemTotal is the physical ram
# reported by the BIOS less the kernel binary code and some other
# reserved regions.
if (( total_memory_kb < 8000000 )); then
warn "!!!"
warn "This system has less than 8Gb of RAM. You will not be able to run a full Automate deployment."
warn "!!!"
fi
document "start_all_services" <<DOC
Simple wrapper for 'start_deployment_service' and 'chef-automate dev deployinate'.
Also applies a license (if present) and creates IAM dev users.
DOC
start_all_services() {
start_deployment_service
chef-automate dev deployinate
if [[ -f "/src/dev/license.jwt" ]]; then
chef-automate license apply "/src/dev/license.jwt"
fi
chef-automate dev create-iam-dev-users
}
document "get_admin_token" <<DOC
This will idempotently generate an API token that has universal access for all your dev / curl needs.
DOC
get_admin_token() {
check_if_deployinate_started || return 1
if [ -f /tmp/admin_token ]; then
cat /tmp/admin_token
else
# note: we don't have to suppress stderr, $(get_api_token) won't capture it.
date +%s | xargs -I % chef-automate iam token create admin-token-% --admin >/tmp/admin_token || return 1
cat /tmp/admin_token
fi
}
document "check_if_deployinate_started" <<DOC
Returns 0 if deployinate is up or 1 if not and a relevant error.
DOC
check_if_deployinate_started() {
if ! type chef-automate > /dev/null 2>&1; then
error "The deploy service has not been installed."
log_line "Run '${GREEN}start_deployment_service${NC}' to consume the dev channel hart or '${GREEN}build components/automate-deployment${NC}' to build from source."
return 1
fi
if ! chef-automate status > /dev/null 2>&1; then
error "The current status of your deployment is unhealthy."
log_line "If you have yet to do so, run '${GREEN}chef-automate dev deployinate${NC}' to deploy Automate."
log_line "If you have already deployed, there is an issue with your deploy."
log_line "You can check the logs with '${YELLOW}sl${NC}' and the status with '${YELLOW}chef-automate status${NC}'."
return 1
fi
return 0
}
# Setup ~/.netrc configuration in the studio
#
# We have a dependency with the automate-ui that is a private github repository.
# At this time, NodeJS requires that you either use git+ssh or https w/ a .netrc.
# We are using a .netrc.
#
# Requirements:
# => Habitat 0.54.0 or greater installed
# => Specify `HAB_STUDIO_SECRET_GITHUB_TOKEN` in your shell environment
# ```
# export HAB_STUDIO_SECRET_GITHUB_TOKEN=secret
# ```
document "generate_netrc_config" <<DOC
Create a .netrc file that can be used to authenticate against GitHub.
Some projects require access to private GitHub repositories. The recommended
pattern is for projects to use the git+https protocol in conjunction with a
.netrc file.
Requirements:
=> Habitat 0.54.0 or greater installed
=> Specify 'HAB_STUDIO_SECRET_GITHUB_TOKEN' in your shell environment (outside the studio)
To learn more about .netrc files, you can check out the following documentation:
https://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html
DOC
generate_netrc_config() {
if [[ -z "$GITHUB_TOKEN" ]]; then
warn "Unable to configure ~/.netrc in the studio."
warn ""
warn "Missing the environment variable: HAB_STUDIO_SECRET_GITHUB_TOKEN"
warn "Without this variable, you will be unable to access private GitHub repositories."
warn ""
warn "Verify that you have this variable exported before entering the studio:"
warn "export HAB_STUDIO_SECRET_GITHUB_TOKEN=[your-secret-token]"
else
echo -e "machine github.com\\n login $GITHUB_TOKEN" >> ~/.netrc
chmod 0600 ~/.netrc
fi
}
generate_netrc_config
# Prepare the environment to run our services
prepare_system() {
# These are needed for opensearch
mount --bind / / > /dev/null
install_if_missing core/busybox-static sysctl > /dev/null
sysctl -w net.ipv6.conf.all.disable_ipv6=1 > /dev/null
sysctl -w vm.max_map_count=262144 > /dev/null
# Workaround to start ES in CI systems
ulimit -n 65536 > /dev/null
# Set DEVPROXY_URL for automate-ui-devproxy to either localhost or host.docker.internal depending
# on if we are in a Vagrant-based dev env or a Docker-based dev env, respectively.
if grep docker /proc/1/cgroup > /dev/null 2>&1; then
export DEVPROXY_URL="host.docker.internal"
else
export DEVPROXY_URL="localhost"
fi
}
prepare_system
log_old_bindirs() {
readarray -t old_bindirs < <(find "$GOBIN_BASE" -maxdepth 1 -mindepth 1 -type d -not -path "$GOBIN")
if [ ${#old_bindirs[@]} -gt 0 ]; then
warn "The following bin directories are out-dated and can be cleaned up:"
for old_dir in "${old_bindirs[@]}"; do
warn " $old_dir"
done
fi
}
log_old_bindirs
# Saves the in memory bash history to a file
save_history() {
history -a /src/.bash_history
}
# if .studiorc is being sourced from an already running studio, don't reset bash
# history -- this is achieved by saving the current history before it is re-read
save_history
# Load the bash history from a file
history -r /src/.bash_history
cleanup() {
save_history
umount /
}
# When exiting the studio save the bash history to a file
trap cleanup EXIT