Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,16 @@ solid-connection 서비스의 AWS, terraform 기반 IaC 레포지토리입니다
```
solid-connection-infra/
├── config/
│ └── secrets/ # 민감한 data 관리
│ └── ...
│ ├── secrets/ # 민감한 data 관리
│ │ └── ...
│ └── side-infra/ # [side infra 관련 설정]
│ └── config.alloy
├── modules/
│ └── app_stack/ # [Prod/Stage 환경의 공통 모듈]
│ ├── scripts
│ │ └── docker_setup.sh
│ │ └── nginx_setup.sh.tftpl
│ │ └── side_infra_setup.sh.tftpl
│ ├── security_groups.tf
│ ├── ec2.tf
│ ├── rds.tf
Expand Down
2 changes: 1 addition & 1 deletion config/secrets
37 changes: 37 additions & 0 deletions config/side-infra/config.alloy.tftpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
livedebugging {
enabled = true
}

logging {
level = "info"
format = "logfmt"
}

local.file_match "spring_logs" {
path_targets = [{ __path__ = "/var/log/spring/*.log" }] // 서비스 로그 파일 경로
}

loki.source.file "spring_source" {
targets = local.file_match.spring_logs.targets // 위에서 정의한 로그 파일 경로 사용
forward_to = [loki.process.spring_labels.receiver] // 읽은 로그를 처리 단계로 전달
}

loki.process "spring_labels" {
forward_to = [loki.write.grafana_loki.receiver] // 처리된 로그를 Loki로 전송

stage.static_labels {
values = {
service = "backend",
env = sys.env("ALLOY_ENV"),
}
}
}

loki.write "grafana_loki" {
endpoint {
url = "http://${loki_ip}:3100/loki/api/v1/push"
tenant_id = "fake" // Loki 테넌트 ID (싱글 테넌시이기에 fake로 설정)
batch_wait = "1s" // 로그 배치 전송 대기 시간
batch_size = "1MB" // 로그 배치 크기
}
}
15 changes: 13 additions & 2 deletions environment/prod/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ module "prod_stack" {

# 키페어 및 접속 허용
key_name = var.key_name

# 인스턴스 스펙
instance_type = var.server_instance_type
db_instance_class = var.db_instance_class
Expand All @@ -24,7 +24,7 @@ module "prod_stack" {

# RDS 식별자 설정
rds_identifier = var.rds_identifier

# DB 계정 정보
db_username = var.db_root_username
db_password = var.db_root_password
Expand All @@ -45,4 +45,15 @@ module "prod_stack" {
# S3 버킷 이름 전달
s3_default_bucket_name = var.s3_default_bucket_name
s3_upload_bucket_name = var.s3_upload_bucket_name

# ssh key 경로 전달
ssh_key_path = var.ssh_key_path

# Side Infra 관련 변수 전달
work_dir = var.work_dir
alloy_env_name = var.alloy_env_name

redis_version = var.redis_version
redis_exporter_version = var.redis_exporter_version
alloy_version = var.alloy_version
}
30 changes: 30 additions & 0 deletions environment/prod/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,33 @@ variable "s3_upload_bucket_name" {
description = "Name of the upload S3 bucket"
type = string
}

variable "ssh_key_path" {
description = "Path to the SSH private key file for remote-exec"
type = string
}

variable "work_dir" {
description = "Working directory for the application"
type = string
}

variable "alloy_env_name" {
description = "Alloy Env Name"
type = string
}

variable "redis_version" {
description = "Docker image tag for Redis"
type = string
}

variable "redis_exporter_version" {
description = "Docker image tag for Redis Exporter"
type = string
}

variable "alloy_version" {
description = "Docker image tag for Grafana Alloy"
type = string
}
17 changes: 14 additions & 3 deletions environment/stage/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ module "stage_stack" {

# 키페어 및 접속 허용
key_name = var.key_name

# 인스턴스 스펙
instance_type = var.server_instance_type
instance_type = var.server_instance_type
db_instance_class = var.db_instance_class

# 보안 그룹 규칙
Expand All @@ -24,7 +24,7 @@ module "stage_stack" {

# RDS 식별자 설정
rds_identifier = var.rds_identifier

# DB 계정 정보
db_username = var.db_root_username
db_password = var.db_root_password
Expand All @@ -45,4 +45,15 @@ module "stage_stack" {
# S3 버킷 이름 전달
s3_default_bucket_name = var.s3_default_bucket_name
s3_upload_bucket_name = var.s3_upload_bucket_name

# ssh key 경로 전달
ssh_key_path = var.ssh_key_path

# Side Infra 관련 변수 전달
work_dir = var.work_dir
alloy_env_name = var.alloy_env_name

redis_version = var.redis_version
redis_exporter_version = var.redis_exporter_version
alloy_version = var.alloy_version
}
30 changes: 30 additions & 0 deletions environment/stage/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,33 @@ variable "s3_upload_bucket_name" {
description = "Name of the upload S3 bucket"
type = string
}

variable "ssh_key_path" {
description = "Path to the SSH private key file for remote-exec"
type = string
}

variable "work_dir" {
description = "Working directory for the application"
type = string
}

variable "alloy_env_name" {
description = "Alloy Env Name"
type = string
}

variable "redis_version" {
description = "Docker image tag for Redis"
type = string
}

variable "redis_exporter_version" {
description = "Docker image tag for Redis Exporter"
type = string
}

variable "alloy_version" {
description = "Docker image tag for Grafana Alloy"
type = string
}
116 changes: 103 additions & 13 deletions modules/app_stack/ec2.tf
Original file line number Diff line number Diff line change
@@ -1,4 +1,16 @@
# 3. CloudInit을 이용한 User Data 스크립트 구성
data "aws_instance" "monitoring_server" {
filter {
name = "tag:Name"
values = ["solid-connection-monitoring"]
}

filter {
name = "instance-state-name"
values = ["running"]
}
}
Comment on lines +1 to +11
Copy link

@coderabbitai coderabbitai bot Dec 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Add error handling for missing monitoring server.

The data source will fail the Terraform plan if no monitoring server is found or if multiple servers match the filters. Consider adding validation or documentation about this prerequisite.

🔎 Add validation check

You can add a validation step after the module that uses this data source:

# In the calling module (environment/prod or stage)
locals {
  monitoring_server_id = module.prod_stack.monitoring_server_id
}

# Add output in modules/app_stack/outputs.tf
output "monitoring_server_id" {
  value = data.aws_instance.monitoring_server.id
  description = "ID of the monitoring server"
}

Alternatively, add a precondition check in Terraform 1.2+:

 data "aws_instance" "monitoring_server" {
+  lifecycle {
+    precondition {
+      condition     = length(data.aws_instance.monitoring_server.*.id) == 1
+      error_message = "Exactly one monitoring server must exist with tag Name=solid-connection-monitoring in running state"
+    }
+  }
+
   filter {
     name   = "tag:Name"
     values = ["solid-connection-monitoring"]
   }

Committable suggestion skipped: line range outside the PR's diff.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

어차피 aws_instance라는 단수형으로 선언했기에 2개 이상이거나 0개면 error가 나! 그러니 그 부분은 신경 안써도 돼!

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Hexeong, you're absolutely right! I appreciate the clarification. The data "aws_instance" resource (singular) indeed enforces exactly-one semantics automatically, so Terraform will fail during the plan phase if zero or multiple instances match those filters. My suggested validation was redundant. Thank you for pointing that out!

(\(\
(  ·.·)
o_(")(")*
*thoughtful rabbit*

✏️ Learnings added
Learnt from: Hexeong
Repo: solid-connection/solid-connection-infra PR: 11
File: modules/app_stack/ec2.tf:1-11
Timestamp: 2025-12-29T14:32:47.078Z
Learning: In Terraform, `data "aws_instance"` (singular) automatically fails if zero or more than one instance matches the filters, so explicit validation for this case is not needed.

Note: Learnings are effective only in the context of similar code segments. To apply general coding guidelines, please configure review instructions. You can manage existing learnings in the app.


# CloudInit을 이용한 User Data 스크립트 구성
data "cloudinit_config" "app_init" {
gzip = true
base64_encode = true
Expand All @@ -9,20 +21,9 @@ data "cloudinit_config" "app_init" {
content = file("${path.module}/scripts/docker_setup.sh")
filename = "1_docker_install.sh"
}

# [Part 2] Nginx 설정 스크립트
part {
content_type = "text/x-shellscript"
content = templatefile("${path.module}/scripts/nginx_setup.sh.tftpl", {
domain_name = var.domain_name
email = var.cert_email
conf_file_name = var.nginx_conf_name
})
filename = "2_nginx_setup.sh"
}
}

# 4. API Server (EC2)
# API Server (EC2)
resource "aws_instance" "api_server" {
ami = var.ami_id
instance_type = var.instance_type
Expand Down Expand Up @@ -51,3 +52,92 @@ resource "aws_instance" "api_server" {
]
}
}

# 설정 및 컨테이너 실행
# [리소스 1] Nginx 설정 변경 감지 및 실행
resource "null_resource" "update_nginx" {
depends_on = [aws_instance.api_server]

triggers = {
script_hash = sha256(templatefile("${path.module}/scripts/nginx_setup.sh.tftpl", {
domain_name = var.domain_name
email = var.cert_email
conf_file_name = var.nginx_conf_name
}))
}

connection {
type = "ssh"
user = "ubuntu"
host = aws_instance.api_server.public_ip
private_key = file(var.ssh_key_path)
}

provisioner "file" {
content = templatefile("${path.module}/scripts/nginx_setup.sh.tftpl", {
domain_name = var.domain_name
email = var.cert_email
conf_file_name = var.nginx_conf_name
})
destination = "/tmp/update_nginx.sh"
}

provisioner "remote-exec" {
inline = [
"cloud-init status --wait > /dev/null", # Docker 설치 대기
"chmod +x /tmp/update_nginx.sh",
"echo 'Running Updated Nginx Script...'",
"sudo /tmp/update_nginx.sh",
"rm /tmp/update_nginx.sh"
]
}
}

# [리소스 2] Side Infra 설정 변경 감지 및 실행
resource "null_resource" "update_side_infra" {
depends_on = [aws_instance.api_server]

triggers = {
script_hash = sha256(templatefile("${path.module}/scripts/side_infra_setup.sh.tftpl", {
work_dir = var.work_dir
alloy_env_name = var.alloy_env_name
alloy_config_content = templatefile("${path.module}/../../config/side-infra/config.alloy.tftpl", {
loki_ip = data.aws_instance.monitoring_server.private_ip
})
redis_version = var.redis_version
redis_exporter_version = var.redis_exporter_version
alloy_version = var.alloy_version
}))
}

connection {
type = "ssh"
user = "ubuntu"
host = aws_instance.api_server.public_ip
private_key = file(var.ssh_key_path)
}

provisioner "file" {
content = templatefile("${path.module}/scripts/side_infra_setup.sh.tftpl", {
work_dir = var.work_dir
alloy_env_name = var.alloy_env_name
alloy_config_content = templatefile("${path.module}/../../config/side-infra/config.alloy.tftpl", {
loki_ip = data.aws_instance.monitoring_server.private_ip
})
redis_version = var.redis_version
redis_exporter_version = var.redis_exporter_version
alloy_version = var.alloy_version
})
destination = "/tmp/update_side_infra.sh"
}

provisioner "remote-exec" {
inline = [
"cloud-init status --wait > /dev/null", # Docker 설치 대기
"chmod +x /tmp/update_side_infra.sh",
"echo 'Running Updated Side Infra Script...'",
"sudo /tmp/update_side_infra.sh",
"rm /tmp/update_side_infra.sh"
]
}
}
11 changes: 1 addition & 10 deletions modules/app_stack/scripts/docker_setup.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,4 @@
wait_for_apt_lock() {
echo "Checking for apt locks..."
while fuser /var/lib/dpkg/lock >/dev/null 2>&1 || \
fuser /var/lib/apt/lists/lock >/dev/null 2>&1 || \
fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do
echo "Waiting for apt lock..."
sleep 5
done
echo "Apt lock acquired."
}
#!/bin/bash

# 1. 필수 패키지 설치
apt-get update
Expand Down
Loading