Tiltfile 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. # This Tiltfile contains the deployment and build config for the Wormhole devnet.
  2. #
  3. # We use Buildkit cache mounts and careful layering to avoid unnecessary rebuilds - almost
  4. # all source code changes result in small, incremental rebuilds. Dockerfiles are written such
  5. # that, for example, changing the contract source code won't cause Solana itself to be rebuilt.
  6. #
  7. load("ext://namespace", "namespace_create", "namespace_inject")
  8. load("ext://secret", "secret_yaml_generic")
  9. # set the replica value of a StatefulSet
  10. def set_replicas_in_statefulset(config_yaml, statefulset_name, num_replicas):
  11. for obj in config_yaml:
  12. if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == statefulset_name:
  13. obj["spec"]["replicas"] = num_replicas
  14. return config_yaml
  15. # set the env value of all containers in all jobs
  16. def set_env_in_jobs(config_yaml, name, value):
  17. for obj in config_yaml:
  18. if obj["kind"] == "Job":
  19. for container in obj["spec"]["template"]["spec"]["containers"]:
  20. if not "env" in container:
  21. container["env"] = []
  22. container["env"].append({"name": name, "value": value})
  23. return config_yaml
  24. allow_k8s_contexts("ci")
  25. # Disable telemetry by default
  26. analytics_settings(False)
  27. # Moar updates (default is 3)
  28. update_settings(max_parallel_updates = 10)
  29. # Runtime configuration
  30. config.define_bool("ci", False, "We are running in CI")
  31. config.define_bool("manual", False, "Set TRIGGER_MODE_MANUAL by default")
  32. config.define_string("num", False, "Number of guardian nodes to run")
  33. config.define_string("maxWorkers", False, "Maximum number of workers for sdk-ci-tests. See https://jestjs.io/docs/cli#--maxworkersnumstring")
  34. # You do not usually need to set this argument - this argument is for debugging only. If you do use a different
  35. # namespace, note that the "wormhole" namespace is hardcoded in tests and don't forget specifying the argument
  36. # when running "tilt down".
  37. #
  38. config.define_string("namespace", False, "Kubernetes namespace to use")
  39. # When running Tilt on a server, this can be used to set the public hostname Tilt runs on
  40. # for service links in the UI to work.
  41. config.define_string("webHost", False, "Public hostname for port forwards")
  42. # When running Tilt on a server, this can be used to set the public hostname Tilt runs on
  43. # for service links in the UI to work.
  44. config.define_string("guardiand_loglevel", False, "Log level for guardiand (debug, info, warn, error, dpanic, panic, fatal)")
  45. # Components
  46. config.define_bool("near", False, "Enable Near component")
  47. config.define_bool("sui", False, "Enable Sui component")
  48. config.define_bool("btc", False, "Enable BTC component")
  49. config.define_bool("aptos", False, "Enable Aptos component")
  50. config.define_bool("aztec", False, "Enable Aztec component")
  51. config.define_bool("algorand", False, "Enable Algorand component")
  52. config.define_bool("evm2", False, "Enable second Eth component")
  53. config.define_bool("solana", False, "Enable Solana component")
  54. config.define_bool("solana_watcher", False, "Enable Solana watcher on guardian")
  55. config.define_bool("pythnet", False, "Enable PythNet component")
  56. config.define_bool("terra_classic", False, "Enable Terra Classic component")
  57. config.define_bool("terra2", False, "Enable Terra 2 component")
  58. config.define_bool("ci_tests", False, "Enable tests runner component")
  59. config.define_bool("guardiand_debug", False, "Enable dlv endpoint for guardiand")
  60. config.define_bool("node_metrics", False, "Enable Prometheus & Grafana for Guardian metrics")
  61. config.define_bool("guardiand_governor", False, "Enable chain governor in guardiand")
  62. config.define_bool("wormchain", False, "Enable a wormchain node")
  63. config.define_bool("ibc_relayer", False, "Enable IBC relayer between cosmos chains")
  64. config.define_bool("redis", False, "Enable a redis instance")
  65. config.define_bool("generic_relayer", False, "Enable the generic relayer off-chain component")
  66. config.define_bool("query_server", False, "Enable cross-chain query server")
  67. cfg = config.parse()
  68. num_guardians = int(cfg.get("num", "1"))
  69. max_workers = cfg.get("maxWorkers", "50%")
  70. namespace = cfg.get("namespace", "wormhole")
  71. webHost = cfg.get("webHost", "localhost")
  72. ci = cfg.get("ci", False)
  73. algorand = cfg.get("algorand", ci)
  74. near = cfg.get("near", ci)
  75. aptos = cfg.get("aptos", ci)
  76. aztec = cfg.get("aztec", ci)
  77. sui = cfg.get("sui", ci)
  78. evm2 = cfg.get("evm2", ci)
  79. solana = cfg.get("solana", ci)
  80. pythnet = cfg.get("pythnet", False)
  81. solana_watcher = cfg.get("solana_watcher", solana or pythnet)
  82. terra_classic = cfg.get("terra_classic", ci)
  83. terra2 = cfg.get("terra2", ci)
  84. wormchain = cfg.get("wormchain", ci)
  85. ci_tests = cfg.get("ci_tests", ci)
  86. guardiand_debug = cfg.get("guardiand_debug", False)
  87. node_metrics = cfg.get("node_metrics", False)
  88. guardiand_governor = cfg.get("guardiand_governor", False)
  89. ibc_relayer = cfg.get("ibc_relayer", ci)
  90. btc = cfg.get("btc", False)
  91. redis = cfg.get('redis', ci)
  92. generic_relayer = cfg.get("generic_relayer", ci)
  93. query_server = cfg.get("query_server", ci)
  94. if ci:
  95. guardiand_loglevel = cfg.get("guardiand_loglevel", "warn")
  96. else:
  97. guardiand_loglevel = cfg.get("guardiand_loglevel", "info")
  98. if cfg.get("manual", False):
  99. trigger_mode = TRIGGER_MODE_MANUAL
  100. else:
  101. trigger_mode = TRIGGER_MODE_AUTO
  102. # namespace
  103. if not ci:
  104. namespace_create(namespace)
  105. def k8s_yaml_with_ns(objects):
  106. return k8s_yaml(namespace_inject(objects, namespace))
  107. docker_build(
  108. ref = "cli-gen",
  109. context = ".",
  110. dockerfile = "Dockerfile.cli",
  111. )
  112. docker_build(
  113. ref = "const-gen",
  114. context = ".",
  115. dockerfile = "Dockerfile.const",
  116. build_args={"num_guardians": '%s' % (num_guardians)},
  117. )
  118. # node
  119. docker_build(
  120. ref = "guardiand-image",
  121. context = ".",
  122. dockerfile = "node/Dockerfile",
  123. target = "build",
  124. ignore=["./sdk/js", "./relayer"]
  125. )
  126. def command_with_dlv(argv):
  127. return [
  128. "/dlv",
  129. "--listen=0.0.0.0:2345",
  130. "--accept-multiclient",
  131. "--headless=true",
  132. "--api-version=2",
  133. "--continue=true",
  134. "exec",
  135. argv[0],
  136. "--",
  137. ] + argv[1:]
  138. def generate_bootstrap_peers(num_guardians, port_num):
  139. # Improve the chances of the guardians discovering each other in tilt by making them all bootstrap peers.
  140. # The devnet guardian uses deterministic P2P peer IDs based on the guardian index. The peer IDs here
  141. # were generated using `DeterministicP2PPrivKeyByIndex` in `node/pkg/devnet/deterministic_p2p_key.go`.
  142. peer_ids = [
  143. "12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw",
  144. "12D3KooWHHzSeKaY8xuZVzkLbKFfvNgPPeKhFBGrMbNzbm5akpqu",
  145. "12D3KooWKRyzVWW6ChFjQjK4miCty85Niy49tpPV95XdKu1BcvMA",
  146. "12D3KooWB1b3qZxWJanuhtseF3DmPggHCtG36KZ9ixkqHtdKH9fh",
  147. "12D3KooWE4qDcRrueTuRYWUdQZgcy7APZqBngVeXRt4Y6ytHizKV",
  148. "12D3KooWPgam4TzSVCRa4AbhxQnM9abCYR4E9hV57SN7eAjEYn1j",
  149. "12D3KooWM4yJB31d4hF2F9Vdwuj9WFo1qonoySyw4bVAQ9a9d21o",
  150. "12D3KooWCv935r3ropYhUe5yMCp9QiUoc9A6cZpYQ5x84DqEPbwb",
  151. "12D3KooWQfG74brcJhzpNwjPCZmcbBv8f6wxKgLSYmEDXXdPXQpH",
  152. "12D3KooWNEWRB7PnuZs164xaA9QWM3iZHekHyEQo5qGP5KCHHuSN",
  153. "12D3KooWB224kvi7vN34xJfsfW7bnv6eodxTkgo9VFA6UiaGMgRD",
  154. "12D3KooWCR2EoapJjoQVR4E3NLjWn818gG3XizQ92Yx6C424HL2g",
  155. "12D3KooWNc5rNmCJ9yvXviXaENnp7vqDQjomZwia4aA7Q3hSYkiW",
  156. "12D3KooWBremnqYWBDK6ctvCuhCqJAps5ZAPADu53gXhQHexrvtP",
  157. "12D3KooWFqdBYPrtwErMosomvD4uRtVhXQdqqZZHC3NCBZYVxr4t",
  158. "12D3KooW9yvKfP5HgVaLnNaxWywo3pLAEypk7wjUcpgKwLznk5gQ",
  159. "12D3KooWRuYVGEsecrJJhZsSoKf1UNdBVYKFCmFLNj9ucZiSQCYj",
  160. "12D3KooWGEcD5sW5osB6LajkHGqiGc3W8eKfYwnJVVqfujkpLWX2",
  161. "12D3KooWQYz2inBsgiBoqNtmEn1qeRBr9B8cdishFuBgiARcfMcY"
  162. ]
  163. bootstrap = ""
  164. for idx in range(num_guardians):
  165. if bootstrap != "":
  166. bootstrap += ","
  167. bootstrap += "/dns4/guardian-{idx}.guardian/udp/{port}/quic/p2p/{peer}".format(idx = idx, port = port_num, peer = peer_ids[idx])
  168. return bootstrap
  169. bootstrapPeers = generate_bootstrap_peers(num_guardians, 8999)
  170. ccqBootstrapPeers = generate_bootstrap_peers(num_guardians, 8996)
  171. def build_node_yaml():
  172. node_yaml = read_yaml_stream("devnet/node.yaml")
  173. node_yaml_with_replicas = set_replicas_in_statefulset(node_yaml, "guardian", num_guardians)
  174. for obj in node_yaml_with_replicas:
  175. if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == "guardian":
  176. container = obj["spec"]["template"]["spec"]["containers"][0]
  177. if container["name"] != "guardiand":
  178. fail("container 0 is not guardiand")
  179. container["command"] += ["--logLevel="+guardiand_loglevel]
  180. if guardiand_debug:
  181. container["command"] = command_with_dlv(container["command"])
  182. print(container["command"])
  183. if num_guardians > 1:
  184. container["command"] += [
  185. "--bootstrap",
  186. bootstrapPeers,
  187. "--ccqP2pBootstrap",
  188. ccqBootstrapPeers,
  189. ]
  190. if aptos:
  191. container["command"] += [
  192. "--aptosRPC",
  193. "http://aptos:8080",
  194. "--aptosAccount",
  195. "de0036a9600559e295d5f6802ef6f3f802f510366e0c23912b0655d972166017",
  196. "--aptosHandle",
  197. "0xde0036a9600559e295d5f6802ef6f3f802f510366e0c23912b0655d972166017::state::WormholeMessageHandle",
  198. ]
  199. if aztec:
  200. container["command"] += [
  201. "--aztecRPC",
  202. "http://aztec-sandbox:8090",
  203. "--aztecContract",
  204. "0x240ca8722f92a439009fd185dddb4a315de26dd34c0067de2d8b9c58afd87432",
  205. ]
  206. if sui:
  207. container["command"] += [
  208. "--suiRPC",
  209. "http://sui:9000",
  210. "--suiMoveEventType",
  211. "0x320a40bff834b5ffa12d7f5cc2220dd733dd9e8e91c425800203d06fb2b1fee8::publish_message::WormholeMessage",
  212. ]
  213. if evm2:
  214. container["command"] += [
  215. "--bscRPC",
  216. "ws://eth-devnet2:8545",
  217. ]
  218. else:
  219. container["command"] += [
  220. "--bscRPC",
  221. "ws://eth-devnet:8545",
  222. ]
  223. if solana_watcher:
  224. container["command"] += [
  225. "--solanaRPC",
  226. "http://solana-devnet:8899",
  227. "--solanaContract",
  228. "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o",
  229. "--solanaShimContract",
  230. "EtZMZM22ViKMo4r5y4Anovs3wKQ2owUmDpjygnMMcdEX",
  231. ]
  232. if pythnet:
  233. container["command"] += [
  234. "--pythnetRPC",
  235. # "http://solana-devnet:8899",
  236. "http://pythnet.rpcpool.com",
  237. "--pythnetWS",
  238. # "ws://solana-devnet:8900",
  239. "wss://pythnet.rpcpool.com",
  240. "--pythnetContract",
  241. "H3fxXJ86ADW2PNuDDmZJg6mzTtPxkYCpNuQUTgmJ7AjU",
  242. ]
  243. if terra_classic:
  244. container["command"] += [
  245. "--terraWS",
  246. "ws://terra-terrad:26657/websocket",
  247. "--terraLCD",
  248. "http://terra-terrad:1317",
  249. "--terraContract",
  250. "terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au",
  251. ]
  252. if terra2:
  253. container["command"] += [
  254. "--terra2WS",
  255. "ws://terra2-terrad:26657/websocket",
  256. "--terra2LCD",
  257. "http://terra2-terrad:1317",
  258. "--terra2Contract",
  259. "terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au",
  260. ]
  261. if algorand:
  262. container["command"] += [
  263. "--algorandAppID",
  264. "1004",
  265. "--algorandIndexerRPC",
  266. "http://algorand:8980",
  267. "--algorandIndexerToken",
  268. "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
  269. "--algorandAlgodRPC",
  270. "http://algorand:4001",
  271. "--algorandAlgodToken",
  272. "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
  273. ]
  274. if guardiand_governor:
  275. container["command"] += [
  276. "--chainGovernorEnabled"
  277. ]
  278. if near:
  279. container["command"] += [
  280. "--nearRPC",
  281. "http://near:3030",
  282. "--nearContract",
  283. "wormhole.test.near"
  284. ]
  285. if wormchain:
  286. container["command"] += [
  287. "--wormchainURL",
  288. "wormchain:9090",
  289. "--accountantWS",
  290. "http://wormchain:26657",
  291. "--accountantContract",
  292. "wormhole14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9srrg465",
  293. "--accountantKeyPath",
  294. "/tmp/mounted-keys/wormchain/accountantKey",
  295. "--accountantKeyPassPhrase",
  296. "test0000",
  297. "--accountantCheckEnabled",
  298. "true",
  299. "--accountantNttContract",
  300. "wormhole17p9rzwnnfxcjp32un9ug7yhhzgtkhvl9jfksztgw5uh69wac2pgshdnj3k",
  301. "--accountantNttKeyPath",
  302. "/tmp/mounted-keys/wormchain/accountantNttKey",
  303. "--accountantNttKeyPassPhrase",
  304. "test0000",
  305. "--ibcContract",
  306. "wormhole1nc5tatafv6eyq7llkr2gv50ff9e22mnf70qgjlv737ktmt4eswrq0kdhcj",
  307. "--ibcWS",
  308. "ws://wormchain:26657/websocket",
  309. "--ibcLCD",
  310. "http://wormchain:1317",
  311. "--gatewayRelayerContract",
  312. "wormhole1wn625s4jcmvk0szpl85rj5azkfc6suyvf75q6vrddscjdphtve8sca0pvl",
  313. "--gatewayRelayerKeyPath",
  314. "/tmp/mounted-keys/wormchain/gwrelayerKey",
  315. "--gatewayRelayerKeyPassPhrase",
  316. "test0000",
  317. "--gatewayContract",
  318. "wormhole1ghd753shjuwexxywmgs4xz7x2q732vcnkm6h2pyv9s6ah3hylvrqtm7t3h",
  319. "--gatewayWS",
  320. "ws://wormchain:26657/websocket",
  321. "--gatewayLCD",
  322. "http://wormchain:1317"
  323. ]
  324. return encode_yaml_stream(node_yaml_with_replicas)
  325. k8s_yaml_with_ns(build_node_yaml())
  326. guardian_resource_deps = ["eth-devnet"]
  327. if evm2:
  328. guardian_resource_deps = guardian_resource_deps + ["eth-devnet2"]
  329. if solana_watcher:
  330. guardian_resource_deps = guardian_resource_deps + ["solana-devnet"]
  331. if near:
  332. guardian_resource_deps = guardian_resource_deps + ["near"]
  333. if terra_classic:
  334. guardian_resource_deps = guardian_resource_deps + ["terra-terrad"]
  335. if terra2:
  336. guardian_resource_deps = guardian_resource_deps + ["terra2-terrad"]
  337. if algorand:
  338. guardian_resource_deps = guardian_resource_deps + ["algorand"]
  339. if aptos:
  340. guardian_resource_deps = guardian_resource_deps + ["aptos"]
  341. if aztec:
  342. guardian_resource_deps = guardian_resource_deps + ["aztec-sandbox"]
  343. if wormchain:
  344. guardian_resource_deps = guardian_resource_deps + ["wormchain", "wormchain-deploy"]
  345. if sui:
  346. guardian_resource_deps = guardian_resource_deps + ["sui"]
  347. k8s_resource(
  348. "guardian",
  349. resource_deps = guardian_resource_deps,
  350. port_forwards = [
  351. port_forward(6060, name = "Debug/Status Server [:6060]", host = webHost),
  352. port_forward(7070, name = "Public gRPC [:7070]", host = webHost),
  353. port_forward(7071, name = "Public REST [:7071]", host = webHost),
  354. port_forward(2345, name = "Debugger [:2345]", host = webHost),
  355. ],
  356. labels = ["guardian"],
  357. trigger_mode = trigger_mode,
  358. )
  359. # guardian set update - triggered by "tilt args" changes
  360. if num_guardians >= 2 and ci == False:
  361. local_resource(
  362. name = "guardian-set-update",
  363. resource_deps = guardian_resource_deps + ["guardian"],
  364. deps = ["scripts/send-vaa.sh", "clients/eth"],
  365. cmd = './scripts/update-guardian-set.sh %s %s %s' % (num_guardians, webHost, namespace),
  366. labels = ["guardian"],
  367. trigger_mode = trigger_mode,
  368. )
  369. # grafana + prometheus for node metrics
  370. if node_metrics:
  371. dashboard = read_json("dashboards/Wormhole.json")
  372. dashboard_yaml = {
  373. "apiVersion": "v1",
  374. "kind": "ConfigMap",
  375. "metadata": {
  376. "name": "grafana-dashboards-json"
  377. },
  378. "data": {
  379. "wormhole.json": encode_json(dashboard)
  380. }
  381. }
  382. k8s_yaml_with_ns(encode_yaml(dashboard_yaml))
  383. k8s_yaml_with_ns("devnet/node-metrics.yaml")
  384. k8s_resource(
  385. "prometheus-server",
  386. resource_deps = ["guardian"],
  387. port_forwards = [
  388. port_forward(9099, name = "Prometheus [:9099]", host = webHost),
  389. ],
  390. labels = ["guardian"],
  391. trigger_mode = trigger_mode,
  392. )
  393. k8s_resource(
  394. "grafana",
  395. resource_deps = ["prometheus-server"],
  396. port_forwards = [
  397. port_forward(3033, name = "Grafana UI [:3033]", host = webHost),
  398. ],
  399. labels = ["guardian"],
  400. trigger_mode = trigger_mode,
  401. )
  402. # spy
  403. k8s_yaml_with_ns("devnet/spy.yaml")
  404. k8s_resource(
  405. "spy",
  406. resource_deps = ["guardian"],
  407. port_forwards = [
  408. port_forward(6061, container_port = 6060, name = "Debug/Status Server [:6061]", host = webHost),
  409. port_forward(7072, name = "Spy gRPC [:7072]", host = webHost),
  410. ],
  411. labels = ["guardian"],
  412. trigger_mode = trigger_mode,
  413. )
  414. if solana or pythnet:
  415. # solana client cli (used for devnet setup)
  416. docker_build(
  417. ref = "bridge-client",
  418. context = ".",
  419. only = ["./proto", "./solana", "./clients"],
  420. dockerfile = "solana/Dockerfile.client",
  421. # Ignore target folders from local (non-container) development.
  422. ignore = ["./solana/*/target", "./solana/tests"],
  423. )
  424. # solana smart contract
  425. docker_build(
  426. ref = "solana-contract",
  427. context = "solana",
  428. dockerfile = "solana/Dockerfile",
  429. target = "builder",
  430. ignore = ["./solana/*/target", "./solana/tests"],
  431. build_args = {"BRIDGE_ADDRESS": "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o", "CHAIN_ID": "1"},
  432. )
  433. # solana local devnet
  434. docker_build(
  435. ref = "solana-test-validator",
  436. context = "solana",
  437. dockerfile = "solana/Dockerfile.test-validator",
  438. )
  439. k8s_yaml_with_ns("devnet/solana-devnet.yaml")
  440. k8s_resource(
  441. "solana-devnet",
  442. port_forwards = [
  443. port_forward(8899, name = "Solana RPC [:8899]", host = webHost),
  444. port_forward(8900, name = "Solana WS [:8900]", host = webHost),
  445. ],
  446. labels = ["solana"],
  447. trigger_mode = trigger_mode,
  448. )
  449. # eth devnet
  450. docker_build(
  451. ref = "eth-node",
  452. context = ".",
  453. only = ["./ethereum", "./relayer/ethereum"],
  454. dockerfile = "./ethereum/Dockerfile",
  455. # ignore local node_modules (in case they're present)
  456. ignore = ["./ethereum/node_modules","./relayer/ethereum/node_modules"],
  457. build_args = {"num_guardians": str(num_guardians), "dev": str(not ci)},
  458. # sync external scripts for incremental development
  459. # (everything else needs to be restarted from scratch for determinism)
  460. #
  461. # This relies on --update-mode=exec to work properly with a non-root user.
  462. # https://github.com/tilt-dev/tilt/issues/3708
  463. live_update = [
  464. sync("./ethereum/src", "/home/node/app/src"),
  465. ],
  466. )
  467. if redis or generic_relayer:
  468. docker_build(
  469. ref = "redis",
  470. context = ".",
  471. only = ["./third_party"],
  472. dockerfile = "third_party/redis/Dockerfile",
  473. )
  474. if redis:
  475. k8s_resource(
  476. "redis",
  477. port_forwards = [
  478. port_forward(6379, name = "Redis Default [:6379]", host = webHost),
  479. ],
  480. labels = ["redis"],
  481. trigger_mode = trigger_mode,
  482. )
  483. k8s_yaml_with_ns("devnet/redis.yaml")
  484. if generic_relayer:
  485. k8s_resource(
  486. "redis-relayer",
  487. port_forwards = [
  488. port_forward(6378, name = "Generic Relayer Redis [:6378]", host = webHost),
  489. ],
  490. labels = ["redis-relayer"],
  491. trigger_mode = trigger_mode,
  492. )
  493. k8s_yaml_with_ns("devnet/redis-relayer.yaml")
  494. if generic_relayer:
  495. k8s_resource(
  496. "relayer-engine",
  497. resource_deps = ["guardian", "redis-relayer", "spy"],
  498. port_forwards = [
  499. port_forward(3003, container_port=3000, name = "Bullmq UI [:3003]", host = webHost),
  500. ],
  501. labels = ["relayer-engine"],
  502. trigger_mode = trigger_mode,
  503. )
  504. docker_build(
  505. ref = "relayer-engine",
  506. context = ".",
  507. only = ["./relayer/generic_relayer", "./relayer/ethereum/ts-scripts/relayer/config"],
  508. dockerfile = "relayer/generic_relayer/relayer-engine-v2/Dockerfile",
  509. build_args = {"dev": str(not ci)}
  510. )
  511. k8s_yaml_with_ns("devnet/relayer-engine.yaml")
  512. k8s_yaml_with_ns("devnet/eth-devnet.yaml")
  513. k8s_resource(
  514. "eth-devnet",
  515. port_forwards = [
  516. port_forward(8545, name = "Anvil RPC [:8545]", host = webHost),
  517. ],
  518. labels = ["evm"],
  519. trigger_mode = trigger_mode,
  520. )
  521. if evm2:
  522. k8s_yaml_with_ns("devnet/eth-devnet2.yaml")
  523. k8s_resource(
  524. "eth-devnet2",
  525. port_forwards = [
  526. port_forward(8546, 8545, name = "Anvil RPC [:8546]", host = webHost),
  527. ],
  528. labels = ["evm"],
  529. trigger_mode = trigger_mode,
  530. )
  531. # Note that ci_tests requires other resources in order to build properly:
  532. # - eth-devnet -- required by: accountant_tests, ntt_accountant_tests, tx-verifier
  533. # - eth-devnet2 -- required by: accountant_tests, ntt_accountant_tests
  534. # - wormchain -- required by: accountant_tests, ntt_accountant_tests
  535. # - solana -- required by: spydk-ci-tests
  536. if ci_tests:
  537. docker_build(
  538. ref = "sdk-test-image",
  539. context = ".",
  540. dockerfile = "testing/Dockerfile.sdk.test",
  541. only = [],
  542. live_update = [
  543. sync("./sdk/js/src", "/app/sdk/js/src"),
  544. sync("./testing", "/app/testing"),
  545. ],
  546. )
  547. docker_build(
  548. ref = "spydk-test-image",
  549. context = ".",
  550. dockerfile = "testing/Dockerfile.spydk.test",
  551. only = [],
  552. live_update = [
  553. sync("./spydk/js/src", "/app/spydk/js/src"),
  554. sync("./testing", "/app/testing"),
  555. ],
  556. )
  557. docker_build(
  558. ref = "query-sdk-test-image",
  559. context = ".",
  560. dockerfile = "testing/Dockerfile.querysdk.test",
  561. only = [],
  562. live_update = [
  563. sync("./sdk/js/src", "/app/sdk/js-query/src"),
  564. sync("./testing", "/app/testing"),
  565. ],
  566. )
  567. docker_build(
  568. ref = "tx-verifier-evm",
  569. context = "./devnet/tx-verifier/",
  570. dockerfile = "./devnet/tx-verifier/Dockerfile.tx-verifier-evm"
  571. )
  572. k8s_yaml_with_ns("devnet/tx-verifier-evm.yaml")
  573. if sui:
  574. docker_build(
  575. ref = "tx-verifier-sui",
  576. context = "./devnet/tx-verifier/",
  577. dockerfile = "./devnet/tx-verifier/Dockerfile.tx-verifier-sui"
  578. )
  579. k8s_yaml_with_ns("devnet/tx-verifier-sui.yaml")
  580. k8s_yaml_with_ns(
  581. encode_yaml_stream(
  582. set_env_in_jobs(
  583. set_env_in_jobs(
  584. set_env_in_jobs(read_yaml_stream("devnet/tests.yaml"), "NUM_GUARDIANS", str(num_guardians)),
  585. "BOOTSTRAP_PEERS", str(ccqBootstrapPeers)),
  586. "MAX_WORKERS", max_workers))
  587. )
  588. # separate resources to parallelize docker builds
  589. k8s_resource(
  590. "sdk-ci-tests",
  591. labels = ["ci"],
  592. trigger_mode = trigger_mode,
  593. resource_deps = [], # testing/sdk.sh handles waiting for spy, not having deps gets the build earlier
  594. )
  595. k8s_resource(
  596. "spydk-ci-tests",
  597. labels = ["ci"],
  598. trigger_mode = trigger_mode,
  599. resource_deps = [], # testing/spydk.sh handles waiting for spy, not having deps gets the build earlier
  600. )
  601. k8s_resource(
  602. "accountant-ci-tests",
  603. labels = ["ci"],
  604. trigger_mode = trigger_mode,
  605. resource_deps = [], # uses devnet-consts.json, but wormchain/contracts/tools/test_accountant.sh handles waiting for guardian, not having deps gets the build earlier
  606. )
  607. k8s_resource(
  608. "ntt-accountant-ci-tests",
  609. labels = ["ci"],
  610. trigger_mode = trigger_mode,
  611. resource_deps = [], # uses devnet-consts.json, but wormchain/contracts/tools/test_ntt_accountant.sh handles waiting for guardian, not having deps gets the build earlier
  612. )
  613. k8s_resource(
  614. "query-sdk-ci-tests",
  615. labels = ["ci"],
  616. trigger_mode = trigger_mode,
  617. resource_deps = [], # testing/querysdk.sh handles waiting for query-server, not having deps gets the build earlier
  618. )
  619. # launches Transfer Verifier binary and sets up monitoring script
  620. k8s_resource(
  621. "tx-verifier-evm",
  622. labels = ["tx-verifier"],
  623. trigger_mode = trigger_mode,
  624. resource_deps = ["eth-devnet"],
  625. )
  626. k8s_resource(
  627. "custom-consistency-level-ci-tests",
  628. labels = ["ci"],
  629. trigger_mode = trigger_mode,
  630. resource_deps = [], # uses devnet-consts.json, buttesting/contract-integrations/custom_consistency_level/test_custom_consistency_level.sh handles waiting for guardian, not having deps gets the build earlier
  631. )
  632. if sui:
  633. k8s_resource(
  634. "tx-verifier-sui",
  635. labels = ["tx-verifier"],
  636. trigger_mode = trigger_mode,
  637. resource_deps = ["sui"]
  638. )
  639. if terra_classic:
  640. docker_build(
  641. ref = "terra-image",
  642. context = "./terra/devnet",
  643. dockerfile = "terra/devnet/Dockerfile",
  644. platform = "linux/amd64",
  645. )
  646. docker_build(
  647. ref = "terra-contracts",
  648. context = "./terra",
  649. dockerfile = "./terra/Dockerfile",
  650. platform = "linux/amd64",
  651. )
  652. k8s_yaml_with_ns("devnet/terra-devnet.yaml")
  653. k8s_resource(
  654. "terra-terrad",
  655. port_forwards = [
  656. port_forward(26657, name = "Terra RPC [:26657]", host = webHost),
  657. port_forward(1317, name = "Terra LCD [:1317]", host = webHost),
  658. ],
  659. labels = ["terra"],
  660. trigger_mode = trigger_mode,
  661. )
  662. if terra2 or wormchain:
  663. docker_build(
  664. ref = "cosmwasm_artifacts",
  665. context = ".",
  666. dockerfile = "./cosmwasm/Dockerfile",
  667. target = "artifacts",
  668. platform = "linux/amd64",
  669. )
  670. if terra2:
  671. docker_build(
  672. ref = "terra2-image",
  673. context = "./cosmwasm/deployment/terra2/devnet",
  674. dockerfile = "./cosmwasm/deployment/terra2/devnet/Dockerfile",
  675. platform = "linux/amd64",
  676. )
  677. docker_build(
  678. ref = "terra2-deploy",
  679. context = "./cosmwasm/deployment/terra2",
  680. dockerfile = "./cosmwasm/Dockerfile.deploy",
  681. )
  682. k8s_yaml_with_ns("devnet/terra2-devnet.yaml")
  683. k8s_resource(
  684. "terra2-terrad",
  685. port_forwards = [
  686. port_forward(26658, container_port = 26657, name = "Terra 2 RPC [:26658]", host = webHost),
  687. port_forward(1318, container_port = 1317, name = "Terra 2 LCD [:1318]", host = webHost),
  688. ],
  689. labels = ["terra2"],
  690. trigger_mode = trigger_mode,
  691. )
  692. if algorand:
  693. k8s_yaml_with_ns("devnet/algorand-devnet.yaml")
  694. docker_build(
  695. ref = "algorand-algod",
  696. context = "algorand/sandbox-algorand",
  697. dockerfile = "algorand/sandbox-algorand/images/algod/Dockerfile"
  698. )
  699. docker_build(
  700. ref = "algorand-indexer",
  701. context = "algorand/sandbox-algorand",
  702. dockerfile = "algorand/sandbox-algorand/images/indexer/Dockerfile"
  703. )
  704. docker_build(
  705. ref = "algorand-contracts",
  706. context = "algorand",
  707. dockerfile = "algorand/Dockerfile",
  708. ignore = ["algorand/test/*.*"]
  709. )
  710. k8s_resource(
  711. "algorand",
  712. port_forwards = [
  713. port_forward(4001, name = "Algod [:4001]", host = webHost),
  714. port_forward(4002, name = "KMD [:4002]", host = webHost),
  715. port_forward(8980, name = "Indexer [:8980]", host = webHost),
  716. ],
  717. labels = ["algorand"],
  718. trigger_mode = trigger_mode,
  719. )
  720. if sui:
  721. k8s_yaml_with_ns("devnet/sui-devnet.yaml")
  722. docker_build(
  723. ref = "sui-node",
  724. target = "sui",
  725. context = ".",
  726. dockerfile = "sui/Dockerfile",
  727. ignore = ["./sui/sui.log*", "sui/sui.log*", "sui.log.*"],
  728. only = ["./sui"],
  729. )
  730. k8s_resource(
  731. "sui",
  732. port_forwards = [
  733. port_forward(9000, 9000, name = "RPC [:9000]", host = webHost),
  734. port_forward(9184, name = "Prometheus [:9184]", host = webHost),
  735. ],
  736. labels = ["sui"],
  737. trigger_mode = trigger_mode,
  738. )
  739. if near:
  740. k8s_yaml_with_ns("devnet/near-devnet.yaml")
  741. docker_build(
  742. ref = "near-node",
  743. context = "near",
  744. dockerfile = "near/Dockerfile",
  745. only = ["Dockerfile", "node_builder.sh", "start_node.sh", "README.md"],
  746. )
  747. docker_build(
  748. ref = "near-deploy",
  749. context = "near",
  750. dockerfile = "near/Dockerfile.deploy",
  751. ignore = ["./test"]
  752. )
  753. k8s_resource(
  754. "near",
  755. port_forwards = [
  756. port_forward(3030, name = "Node [:3030]", host = webHost),
  757. port_forward(3031, name = "webserver [:3031]", host = webHost),
  758. ],
  759. labels = ["near"],
  760. trigger_mode = trigger_mode,
  761. )
  762. if wormchain:
  763. docker_build(
  764. ref = "wormchaind-image",
  765. context = ".",
  766. dockerfile = "./wormchain/Dockerfile",
  767. platform = "linux/amd64",
  768. build_args = {"num_guardians": str(num_guardians)},
  769. only = [],
  770. ignore = ["./wormchain/testing", "./wormchain/ts-sdk", "./wormchain/design", "./wormchain/vue", "./wormchain/build/wormchaind"],
  771. )
  772. # docker_build(
  773. # ref = "vue-export",
  774. # context = ".",
  775. # dockerfile = "./wormchain/Dockerfile.proto",
  776. # target = "vue-export",
  777. # )
  778. docker_build(
  779. ref = "wormchain-deploy",
  780. context = "./wormchain",
  781. dockerfile = "./wormchain/Dockerfile.deploy",
  782. )
  783. def build_wormchain_yaml(yaml_path, num_instances):
  784. wormchain_yaml = read_yaml_stream(yaml_path)
  785. # set the number of replicas in the StatefulSet to be num_guardians
  786. wormchain_set = set_replicas_in_statefulset(wormchain_yaml, "wormchain", num_instances)
  787. # add a Service for each wormchain instance
  788. services = []
  789. for obj in wormchain_set:
  790. if obj["kind"] == "Service" and obj["metadata"]["name"] == "wormchain-0":
  791. # make a Service for each replica so we can resolve it by name from other pods.
  792. # copy wormchain-0's Service then set the name and selector for the instance.
  793. for instance_num in list(range(1, num_instances)):
  794. instance_name = 'wormchain-%s' % (instance_num)
  795. # Copy the Service's properties to a new dict, by value, three levels deep.
  796. # tl;dr - if the value is a dict, use a comprehension to copy it immutably.
  797. service = { k: ({ k2: ({ k3:v3
  798. for (k3,v3) in v2.items()} if type(v2) == "dict" else v2)
  799. for (k2,v2) in v.items()} if type(v) == "dict" else v)
  800. for (k,v) in obj.items()}
  801. # add the name we want to be able to resolve via k8s DNS
  802. service["metadata"]["name"] = instance_name
  803. # add the name of the pod the service should connect to
  804. service["spec"]["selector"] = { "statefulset.kubernetes.io/pod-name": instance_name }
  805. services.append(service)
  806. return encode_yaml_stream(wormchain_set + services)
  807. wormchain_path = "devnet/wormchain.yaml"
  808. if num_guardians >= 2:
  809. # update wormchain's k8s config to spin up multiple instances
  810. k8s_yaml_with_ns(build_wormchain_yaml(wormchain_path, num_guardians))
  811. else:
  812. k8s_yaml_with_ns(wormchain_path)
  813. k8s_resource(
  814. "wormchain",
  815. port_forwards = [
  816. port_forward(1319, container_port = 1317, name = "REST [:1319]", host = webHost),
  817. port_forward(9090, container_port = 9090, name = "GRPC", host = webHost),
  818. port_forward(26659, container_port = 26657, name = "TENDERMINT [:26659]", host = webHost)
  819. ],
  820. labels = ["wormchain"],
  821. trigger_mode = trigger_mode,
  822. )
  823. k8s_resource(
  824. "wormchain-deploy",
  825. resource_deps = ["wormchain"],
  826. labels = ["wormchain"],
  827. trigger_mode = trigger_mode,
  828. )
  829. if ibc_relayer:
  830. docker_build(
  831. ref = "ibc-relayer-image",
  832. context = ".",
  833. dockerfile = "./wormchain/ibc-relayer/Dockerfile",
  834. only = []
  835. )
  836. k8s_yaml_with_ns("devnet/ibc-relayer.yaml")
  837. k8s_resource(
  838. "ibc-relayer",
  839. port_forwards = [
  840. port_forward(7597, name = "HTTPDEBUG [:7597]", host = webHost),
  841. ],
  842. resource_deps = ["wormchain-deploy", "terra2-terrad"],
  843. labels = ["ibc-relayer"],
  844. trigger_mode = trigger_mode,
  845. )
  846. if btc:
  847. k8s_yaml_with_ns("devnet/btc-localnet.yaml")
  848. docker_build(
  849. ref = "btc-node",
  850. context = "bitcoin",
  851. dockerfile = "bitcoin/Dockerfile",
  852. target = "bitcoin-build",
  853. )
  854. k8s_resource(
  855. "btc",
  856. port_forwards = [
  857. port_forward(18556, name = "RPC [:18556]", host = webHost),
  858. ],
  859. labels = ["btc"],
  860. trigger_mode = trigger_mode,
  861. )
  862. if aptos:
  863. k8s_yaml_with_ns("devnet/aptos-localnet.yaml")
  864. docker_build(
  865. ref = "aptos-node",
  866. context = "aptos",
  867. dockerfile = "aptos/Dockerfile",
  868. target = "aptos",
  869. )
  870. k8s_resource(
  871. "aptos",
  872. port_forwards = [
  873. port_forward(8080, name = "RPC [:8080]", host = webHost),
  874. port_forward(6181, name = "FullNode [:6181]", host = webHost),
  875. port_forward(8081, name = "Faucet [:8081]", host = webHost),
  876. ],
  877. labels = ["aptos"],
  878. trigger_mode = trigger_mode,
  879. )
  880. if aztec:
  881. k8s_yaml_with_ns("devnet/aztec-devnet.yaml")
  882. k8s_resource(
  883. "aztec-sandbox",
  884. port_forwards = [
  885. port_forward(8090, name = "RPC [:8090]", host = webHost)
  886. ],
  887. labels = ["aztec-sandbox"],
  888. trigger_mode = trigger_mode,
  889. )
  890. def build_query_server_yaml():
  891. qs_yaml = read_yaml_stream("devnet/query-server.yaml")
  892. for obj in qs_yaml:
  893. if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == "query-server":
  894. container = obj["spec"]["template"]["spec"]["containers"][0]
  895. container["command"] += ["--bootstrap="+ccqBootstrapPeers]
  896. return encode_yaml_stream(qs_yaml)
  897. if query_server:
  898. k8s_yaml_with_ns(build_query_server_yaml())
  899. k8s_resource(
  900. "query-server",
  901. resource_deps = ["guardian"],
  902. port_forwards = [
  903. port_forward(6069, name = "REST [:6069]", host = webHost),
  904. port_forward(6068, name = "Status [:6068]", host = webHost)
  905. ],
  906. labels = ["query-server"],
  907. trigger_mode = trigger_mode
  908. )