Add cuisine subcategory filter, fix remap logic, and add OKE deployment manifests

- Add 파인다이닝/코스 cuisine type to 한식/일식/중식/양식 categories
- Change cuisine filter from flat list to grouped optgroup with subcategories
- Fix remap-foods/remap-cuisine: add jdbcType=CLOB, fix CLOB LISTAGG,
  improve retry logic (3 attempts, batch size 5), add error logging
- Add OKE deployment: Dockerfiles, K8s manifests, deploy.sh, deployment guide
- Add Next.js standalone output for Docker builds

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
joungmin
2026-03-09 22:58:09 +09:00
parent 69e1882c2b
commit ff4e8d742d
18 changed files with 853 additions and 41 deletions

3
.gitignore vendored
View File

@@ -10,3 +10,6 @@ node_modules/
# Java backend
backend-java/build/
backend-java/.gradle/
# K8s secrets (never commit)
k8s/secrets.yaml

16
backend-java/Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
# ── Build stage ──
FROM eclipse-temurin:21-jdk AS build
WORKDIR /app
COPY gradlew settings.gradle build.gradle ./
COPY gradle/ gradle/
RUN chmod +x gradlew && ./gradlew dependencies --no-daemon || true
COPY src/ src/
RUN ./gradlew bootJar -x test --no-daemon
# ── Runtime stage ──
FROM eclipse-temurin:21-jre
WORKDIR /app
COPY --from=build /app/build/libs/*.jar app.jar
EXPOSE 8000
ENV JAVA_OPTS="-XX:MaxRAMPercentage=75.0 -XX:+UseG1GC"
ENTRYPOINT ["sh", "-c", "java $JAVA_OPTS -jar app.jar"]

View File

@@ -138,20 +138,31 @@ public class VideoSseController {
}
}
// Pass 2: retry missed
// Pass 2: retry missed (up to 3 attempts with smaller batches)
if (!allMissed.isEmpty()) {
emit(emitter, Map.of("type", "retry", "missed", allMissed.size()));
for (int i = 0; i < allMissed.size(); i += 10) {
var batch = allMissed.subList(i, Math.min(i + 10, allMissed.size()));
try {
var result = applyRemapBatch(batch);
updated += result.updated;
} catch (Exception ignored) {}
for (int attempt = 0; attempt < 3 && !allMissed.isEmpty(); attempt++) {
var retryList = new ArrayList<>(allMissed);
allMissed.clear();
for (int i = 0; i < retryList.size(); i += 5) {
var batch = retryList.subList(i, Math.min(i + 5, retryList.size()));
try {
var result = applyRemapBatch(batch);
updated += result.updated;
allMissed.addAll(result.missed);
} catch (Exception e) {
log.warn("Remap cuisine retry failed (attempt {}): {}", attempt + 1, e.getMessage());
allMissed.addAll(batch);
}
}
if (!allMissed.isEmpty()) {
emit(emitter, Map.of("type", "retry", "attempt", attempt + 2, "missed", allMissed.size()));
}
}
}
cache.flush();
emit(emitter, Map.of("type", "complete", "total", total, "updated", updated));
emit(emitter, Map.of("type", "complete", "total", total, "updated", updated, "missed", allMissed.size()));
emitter.complete();
} catch (Exception e) {
emitter.completeWithError(e);
@@ -172,7 +183,9 @@ public class VideoSseController {
var rows = restaurantService.findForRemapFoods();
rows = rows.stream().map(r -> {
var m = JsonUtil.lowerKeys(r);
m.put("foods", JsonUtil.parseStringList(m.get("foods_mentioned")));
// foods_mentioned is now TO_CHAR'd in SQL, parse as string
Object fm = m.get("foods_mentioned");
m.put("foods", JsonUtil.parseStringList(fm));
return m;
}).toList();
@@ -191,23 +204,36 @@ public class VideoSseController {
emit(emitter, Map.of("type", "batch_done", "current", Math.min(i + BATCH, total), "total", total, "updated", updated));
} catch (Exception e) {
allMissed.addAll(batch);
log.warn("Remap foods batch error at {}: {}", i, e.getMessage());
emit(emitter, Map.of("type", "error", "message", e.getMessage(), "current", i));
}
}
// Retry missed (up to 3 attempts with smaller batches)
if (!allMissed.isEmpty()) {
emit(emitter, Map.of("type", "retry", "missed", allMissed.size()));
for (int i = 0; i < allMissed.size(); i += 10) {
var batch = allMissed.subList(i, Math.min(i + 10, allMissed.size()));
try {
var r = applyFoodsBatch(batch);
updated += r.updated;
} catch (Exception ignored) {}
for (int attempt = 0; attempt < 3 && !allMissed.isEmpty(); attempt++) {
var retryList = new ArrayList<>(allMissed);
allMissed.clear();
for (int i = 0; i < retryList.size(); i += 5) {
var batch = retryList.subList(i, Math.min(i + 5, retryList.size()));
try {
var r = applyFoodsBatch(batch);
updated += r.updated;
allMissed.addAll(r.missed);
} catch (Exception e) {
log.warn("Remap foods retry failed (attempt {}): {}", attempt + 1, e.getMessage());
allMissed.addAll(batch);
}
}
if (!allMissed.isEmpty()) {
emit(emitter, Map.of("type", "retry", "attempt", attempt + 2, "missed", allMissed.size()));
}
}
}
cache.flush();
emit(emitter, Map.of("type", "complete", "total", total, "updated", updated));
emit(emitter, Map.of("type", "complete", "total", total, "updated", updated, "missed", allMissed.size()));
emitter.complete();
} catch (Exception e) {
emitter.completeWithError(e);

View File

@@ -15,12 +15,12 @@ public final class CuisineTypes {
"한식|백반/한정식", "한식|국밥/해장국", "한식|찌개/전골/탕", "한식|삼겹살/돼지구이",
"한식|소고기/한우구이", "한식|곱창/막창", "한식|닭/오리구이", "한식|족발/보쌈",
"한식|회/횟집", "한식|해산물", "한식|분식", "한식|면", "한식|죽/죽집",
"한식|순대/순대국", "한식|장어/민물", "한식|주점/포차",
"한식|순대/순대국", "한식|장어/민물", "한식|주점/포차", "한식|파인다이닝/코스",
"일식|스시/오마카세", "일식|라멘", "일식|돈카츠", "일식|텐동/튀김",
"일식|이자카야", "일식|야키니쿠", "일식|카레", "일식|소바/우동",
"중식|중화요리", "중식|마라/훠궈", "중식|딤섬/만두", "중식|양꼬치",
"일식|이자카야", "일식|야키니쿠", "일식|카레", "일식|소바/우동", "일식|파인다이닝/코스",
"중식|중화요리", "중식|마라/훠궈", "중식|딤섬/만두", "중식|양꼬치", "중식|파인다이닝/코스",
"양식|파스타/이탈리안", "양식|스테이크", "양식|햄버거", "양식|피자",
"양식|프렌치", "양식|바베큐", "양식|브런치", "양식|비건/샐러드",
"양식|프렌치", "양식|바베큐", "양식|브런치", "양식|비건/샐러드", "양식|파인다이닝/코스",
"아시아|베트남", "아시아|태국", "아시아|인도/중동", "아시아|동남아기타",
"기타|치킨", "기타|카페/디저트", "기타|베이커리", "기타|뷔페", "기타|퓨전"
);

View File

@@ -208,12 +208,13 @@
</update>
<update id="updateFoodsMentioned">
UPDATE video_restaurants SET foods_mentioned = #{foods} WHERE id = #{id}
UPDATE video_restaurants SET foods_mentioned = #{foods,jdbcType=CLOB} WHERE id = #{id}
</update>
<select id="findForRemapCuisine" resultType="map">
SELECT r.id, r.name, r.cuisine_type,
(SELECT LISTAGG(vr.foods_mentioned, '|') WITHIN GROUP (ORDER BY vr.id)
(SELECT LISTAGG(TO_CHAR(DBMS_LOB.SUBSTR(vr.foods_mentioned, 500, 1)), '|')
WITHIN GROUP (ORDER BY vr.id)
FROM video_restaurants vr WHERE vr.restaurant_id = r.id) AS foods
FROM restaurants r
WHERE EXISTS (SELECT 1 FROM video_restaurants vr2 WHERE vr2.restaurant_id = r.id)
@@ -221,7 +222,9 @@
</select>
<select id="findForRemapFoods" resultType="map">
SELECT vr.id, r.name, r.cuisine_type, vr.foods_mentioned, v.title
SELECT vr.id, r.name, r.cuisine_type,
TO_CHAR(vr.foods_mentioned) AS foods_mentioned,
v.title
FROM video_restaurants vr
JOIN restaurants r ON r.id = vr.restaurant_id
JOIN videos v ON v.id = vr.video_id

129
deploy.sh Executable file
View File

@@ -0,0 +1,129 @@
#!/bin/bash
set -euo pipefail
# ── Configuration ──
REGISTRY="icn.ocir.io/idyhsdamac8c/tasteby"
NAMESPACE="tasteby"
PLATFORM="linux/arm64"
# ── Parse arguments ──
TARGET="all" # all | backend | frontend
MESSAGE=""
DRY_RUN=false
while [[ $# -gt 0 ]]; do
case $1 in
--backend-only) TARGET="backend"; shift ;;
--frontend-only) TARGET="frontend"; shift ;;
--dry-run) DRY_RUN=true; shift ;;
-m) MESSAGE="$2"; shift 2 ;;
*) MESSAGE="$1"; shift ;;
esac
done
# ── Determine next version ──
LATEST_TAG=$(git tag --sort=-v:refname | grep '^v' | head -1 2>/dev/null || echo "v0.1.0")
MAJOR=$(echo "$LATEST_TAG" | cut -d. -f1)
MINOR=$(echo "$LATEST_TAG" | cut -d. -f2)
PATCH=$(echo "$LATEST_TAG" | cut -d. -f3)
NEXT_PATCH=$((PATCH + 1))
TAG="${MAJOR}.${MINOR}.${NEXT_PATCH}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " Deploying Tasteby ${TAG}"
echo " Target: ${TARGET}"
echo " Message: ${MESSAGE:-<none>}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if $DRY_RUN; then
echo "[DRY RUN] Would build & push images, apply K8s manifests, create git tag."
exit 0
fi
cd "$(git rev-parse --show-toplevel)"
# ── Build & Push ──
if [[ "$TARGET" == "all" || "$TARGET" == "backend" ]]; then
echo ""
echo "▶ Building backend image..."
docker build --platform "$PLATFORM" \
-t "$REGISTRY/backend:$TAG" \
-t "$REGISTRY/backend:latest" \
backend-java/
echo "▶ Pushing backend image..."
docker push "$REGISTRY/backend:$TAG"
docker push "$REGISTRY/backend:latest"
fi
if [[ "$TARGET" == "all" || "$TARGET" == "frontend" ]]; then
echo ""
echo "▶ Building frontend image..."
# Read build args from env or .env file
MAPS_KEY="${NEXT_PUBLIC_GOOGLE_MAPS_API_KEY:-}"
CLIENT_ID="${NEXT_PUBLIC_GOOGLE_CLIENT_ID:-}"
if [[ -f frontend/.env.local ]]; then
MAPS_KEY="${MAPS_KEY:-$(grep NEXT_PUBLIC_GOOGLE_MAPS_API_KEY frontend/.env.local 2>/dev/null | cut -d= -f2)}"
CLIENT_ID="${CLIENT_ID:-$(grep NEXT_PUBLIC_GOOGLE_CLIENT_ID frontend/.env.local 2>/dev/null | cut -d= -f2)}"
fi
docker build --platform "$PLATFORM" \
--build-arg NEXT_PUBLIC_GOOGLE_MAPS_API_KEY="$MAPS_KEY" \
--build-arg NEXT_PUBLIC_GOOGLE_CLIENT_ID="$CLIENT_ID" \
-t "$REGISTRY/frontend:$TAG" \
-t "$REGISTRY/frontend:latest" \
frontend/
echo "▶ Pushing frontend image..."
docker push "$REGISTRY/frontend:$TAG"
docker push "$REGISTRY/frontend:latest"
fi
# ── Deploy to K8s ──
echo ""
echo "▶ Updating K8s deployments..."
if [[ "$TARGET" == "all" || "$TARGET" == "backend" ]]; then
kubectl set image deployment/backend \
backend="$REGISTRY/backend:$TAG" \
-n "$NAMESPACE"
echo " Waiting for backend rollout..."
kubectl rollout status deployment/backend -n "$NAMESPACE" --timeout=180s
fi
if [[ "$TARGET" == "all" || "$TARGET" == "frontend" ]]; then
kubectl set image deployment/frontend \
frontend="$REGISTRY/frontend:$TAG" \
-n "$NAMESPACE"
echo " Waiting for frontend rollout..."
kubectl rollout status deployment/frontend -n "$NAMESPACE" --timeout=120s
fi
# ── Git tag ──
echo ""
echo "▶ Creating git tag ${TAG}..."
TAG_MESSAGE="Deploy ${TAG}"
if [[ -n "$MESSAGE" ]]; then
TAG_MESSAGE="${TAG_MESSAGE}: ${MESSAGE}"
fi
# Include changed components
COMPONENTS=""
[[ "$TARGET" == "all" || "$TARGET" == "backend" ]] && COMPONENTS="backend"
[[ "$TARGET" == "all" || "$TARGET" == "frontend" ]] && COMPONENTS="${COMPONENTS:+$COMPONENTS, }frontend"
TAG_MESSAGE="${TAG_MESSAGE}
Components: ${COMPONENTS}
Images:
$([ "$TARGET" == "all" ] || [ "$TARGET" == "backend" ] && echo " - ${REGISTRY}/backend:${TAG}")
$([ "$TARGET" == "all" ] || [ "$TARGET" == "frontend" ] && echo " - ${REGISTRY}/frontend:${TAG}")"
git tag -a "$TAG" -m "$TAG_MESSAGE"
git push origin "$TAG"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " ✅ Deploy complete: ${TAG}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
kubectl get pods -n "$NAMESPACE"

View File

@@ -0,0 +1,343 @@
# Tasteby OKE 배포 가이드
## 아키텍처
```
Internet
OCI Load Balancer (Nginx Ingress가 자동 생성)
├─ / → frontend Service (Next.js :3001)
├─ /api/ → backend Service (Spring Boot :8000)
├─ cert-manager (Let's Encrypt 인증서 자동 발급/갱신)
└─ Redis (in-cluster 캐시 :6379)
```
## 인프라 정보
| 항목 | 값 |
|------|-----|
| 클러스터 | tasteby-cluster |
| 리전 | ap-seoul-1 (Seoul) |
| 노드 | ARM64 × 2대 (2 CPU / 8GB 각) |
| K8s 버전 | v1.34.2 |
| OCI 프로필 | JOUNGMINKOAWS |
| OCIR | icn.ocir.io/idyhsdamac8c/tasteby |
| 도메인 | www.tasteby.net (Namecheap DNS) |
| SSL | Let's Encrypt (cert-manager + HTTP-01) |
## 파일 구조
```
tasteby/
├── backend-java/Dockerfile
├── frontend/Dockerfile
├── k8s/
│ ├── namespace.yaml
│ ├── configmap.yaml
│ ├── secrets.yaml.template ← 실제 secrets.yaml은 .gitignore
│ ├── redis-deployment.yaml
│ ├── backend-deployment.yaml
│ ├── frontend-deployment.yaml
│ ├── ingress.yaml
│ └── cert-manager/
│ └── cluster-issuer.yaml
└── deploy.sh
```
## 리소스 배분
| 파드 | replicas | CPU req/lim | 메모리 req/lim |
|------|----------|-------------|----------------|
| backend (Java) | 1 | 500m / 1 | 768Mi / 1536Mi |
| frontend (Next.js) | 1 | 200m / 500m | 256Mi / 512Mi |
| redis | 1 | 100m / 200m | 128Mi / 256Mi |
| ingress-controller | 1 | 100m / 200m | 128Mi / 256Mi |
| cert-manager (×3) | 1씩 | 50m / 100m | 64Mi / 128Mi |
| **합계** | | **~1.2 CPU** | **~1.6GB** |
전체 클러스터: 4 CPU / 16GB → 여유 충분
---
## 1단계: 사전 준비
### 1.1 kubectl 설정
```bash
# OKE kubeconfig 가져오기
oci ce cluster create-kubeconfig \
--cluster-id ocid1.cluster.oc1.ap-seoul-1.aaaaaaaaoqgd2sh6754m5zrwfqaxwrtlqon3dxtdwbbc2dvzbcbou3pf75rq \
--profile JOUNGMINKOAWS \
--region ap-seoul-1 \
--token-version 2.0.0 \
--kube-endpoint PUBLIC_ENDPOINT
# ~/.kube/config의 user args에 --profile JOUNGMINKOAWS 추가 필요
# 확인
kubectl get nodes
```
### 1.2 Helm 설치 (없으면)
```bash
brew install helm
```
---
## 2단계: 인프라 설치 (1회성)
### 2.1 Nginx Ingress Controller
```bash
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm install ingress-nginx ingress-nginx/ingress-nginx \
--namespace ingress-nginx --create-namespace \
--set controller.service.type=LoadBalancer \
--set controller.service.annotations."oci\.oraclecloud\.com/load-balancer-type"=nlb
```
설치 후 External IP 확인:
```bash
kubectl get svc -n ingress-nginx ingress-nginx-controller -w
# EXTERNAL-IP가 나오면 Namecheap에서 A 레코드 업데이트
# www.tasteby.net → <EXTERNAL-IP>
# tasteby.net → <EXTERNAL-IP>
```
### 2.2 cert-manager
```bash
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager --create-namespace \
--set crds.enabled=true
```
### 2.3 ClusterIssuer 생성
```bash
kubectl apply -f k8s/cert-manager/cluster-issuer.yaml
```
---
## 3단계: 네임스페이스 및 시크릿 생성 (1회성)
### 3.1 네임스페이스
```bash
kubectl apply -f k8s/namespace.yaml
```
### 3.2 OCIR 이미지 Pull Secret
```bash
# OCI Auth Token 필요 (콘솔 > User Settings > Auth Tokens에서 생성)
kubectl create secret docker-registry ocir-secret \
--docker-server=icn.ocir.io \
--docker-username="idyhsdamac8c/<oci-username>" \
--docker-password="<auth-token>" \
--docker-email="<email>" \
-n tasteby
```
### 3.3 앱 시크릿
```bash
# secrets.yaml.template을 복사하여 실제 값 입력
cp k8s/secrets.yaml.template k8s/secrets.yaml
# 편집 후
kubectl apply -f k8s/secrets.yaml
```
### 3.4 Oracle Wallet Secret
```bash
# Wallet 디렉토리의 파일들을 Secret으로 생성
kubectl create secret generic oracle-wallet \
--from-file=cwallet.sso=<wallet-dir>/cwallet.sso \
--from-file=tnsnames.ora=<wallet-dir>/tnsnames.ora \
--from-file=sqlnet.ora=<wallet-dir>/sqlnet.ora \
--from-file=keystore.jks=<wallet-dir>/keystore.jks \
--from-file=truststore.jks=<wallet-dir>/truststore.jks \
--from-file=ojdbc.properties=<wallet-dir>/ojdbc.properties \
-n tasteby
```
### 3.5 OCI Config Secret
```bash
# OCI API key config 파일과 PEM 키를 Secret으로 생성
# config 파일은 K8s용으로 수정 필요 (key_file 경로를 /root/.oci/oci_api_key.pem으로)
kubectl create secret generic oci-config \
--from-file=config=<oci-config-for-k8s> \
--from-file=oci_api_key.pem=<pem-key-file> \
-n tasteby
```
**참고**: OCI config 파일에서 `key_file` 경로를 컨테이너 내부 마운트 경로로 수정:
```ini
[DEFAULT]
user=ocid1.user.oc1..xxx
fingerprint=xx:xx:xx
key_file=/root/.oci/oci_api_key.pem
tenancy=ocid1.tenancy.oc1..xxx
region=ap-seoul-1
```
### 3.6 ConfigMap 적용
```bash
kubectl apply -f k8s/configmap.yaml
```
---
## 4단계: 앱 배포
### 4.1 기본 리소스 배포
```bash
kubectl apply -f k8s/redis-deployment.yaml
kubectl apply -f k8s/backend-deployment.yaml
kubectl apply -f k8s/frontend-deployment.yaml
kubectl apply -f k8s/ingress.yaml
```
### 4.2 OCIR 로그인 (이미지 푸시용)
```bash
docker login icn.ocir.io \
-u "idyhsdamac8c/<oci-username>" \
-p "<auth-token>"
```
### 4.3 이미지 빌드 & 배포
```bash
# 전체 배포
./deploy.sh "초기 배포"
# 백엔드만 배포
./deploy.sh --backend-only "API 버그 수정"
# 프론트엔드만 배포
./deploy.sh --frontend-only "UI 개선"
# 드라이런 (실제 실행 안 함)
./deploy.sh --dry-run "테스트"
```
---
## 5단계: DNS 설정
Namecheap에서 A 레코드 변경:
| Type | Host | Value | TTL |
|------|------|-------|-----|
| A | @ | `<LB External IP>` | Automatic |
| A | www | `<LB External IP>` | Automatic |
DNS 전파 후 cert-manager가 자동으로 Let's Encrypt 인증서를 발급합니다.
---
## 운영 명령어
### 상태 확인
```bash
# 파드 상태
kubectl get pods -n tasteby
# 로그 확인
kubectl logs -f deployment/backend -n tasteby
kubectl logs -f deployment/frontend -n tasteby
# 인증서 상태
kubectl get certificate -n tasteby
kubectl describe certificate tasteby-tls -n tasteby
```
### 롤백
```bash
# 이전 버전으로 롤백
kubectl rollout undo deployment/backend -n tasteby
kubectl rollout undo deployment/frontend -n tasteby
# 특정 리비전으로 롤백
kubectl rollout history deployment/backend -n tasteby
kubectl rollout undo deployment/backend --to-revision=2 -n tasteby
```
### 시크릿 업데이트
```bash
# secrets.yaml 수정 후
kubectl apply -f k8s/secrets.yaml
# 파드 재시작 (시크릿 변경 반영)
kubectl rollout restart deployment/backend -n tasteby
```
### 스케일링
```bash
# 백엔드 2개로 확장
kubectl scale deployment/backend --replicas=2 -n tasteby
```
---
## 배포 태그 규칙
- 형식: `v0.1.X` (patch 버전 자동 증가)
- `deploy.sh`가 빌드 → 푸시 → K8s 업데이트 → git tag 생성 → 태그 푸시까지 자동 처리
- 태그 메시지에 배포 대상(backend/frontend)과 이미지 태그 포함
```bash
# 태그 목록 확인
git tag -l 'v*' --sort=-v:refname
# 특정 태그의 배포 내역 확인
git tag -n20 v0.1.5
```
---
## 트러블슈팅
### 이미지 Pull 실패
```bash
kubectl describe pod <pod-name> -n tasteby
# Events에서 ImagePullBackOff 확인 → ocir-secret 점검
```
### DB 연결 실패
```bash
kubectl exec -it deployment/backend -n tasteby -- env | grep ORACLE
# Oracle Wallet 마운트 확인
kubectl exec -it deployment/backend -n tasteby -- ls /etc/oracle/wallet/
```
### 인증서 발급 안 됨
```bash
kubectl get challenges -n tasteby
kubectl describe challenge -n tasteby
# DNS A 레코드가 LB IP로 설정되었는지, 80 포트가 열려있는지 확인
```
### OCI GenAI 연결 실패
```bash
kubectl exec -it deployment/backend -n tasteby -- cat /root/.oci/config
# key_file 경로가 /root/.oci/oci_api_key.pem 인지 확인
```

19
frontend/Dockerfile Normal file
View File

@@ -0,0 +1,19 @@
# ── Build stage ──
FROM node:22-alpine AS build
WORKDIR /app
COPY package.json package-lock.json ./
RUN npm ci
COPY . .
ARG NEXT_PUBLIC_GOOGLE_MAPS_API_KEY
ARG NEXT_PUBLIC_GOOGLE_CLIENT_ID
RUN npm run build
# ── Runtime stage ──
FROM node:22-alpine
WORKDIR /app
COPY --from=build /app/.next/standalone ./
COPY --from=build /app/.next/static ./.next/static
COPY --from=build /app/public ./public
EXPOSE 3001
ENV PORT=3001 HOSTNAME=0.0.0.0
CMD ["node", "server.js"]

View File

@@ -1,7 +1,7 @@
import type { NextConfig } from "next";
const nextConfig: NextConfig = {
/* config options here */
output: "standalone",
};
export default nextConfig;

View File

@@ -13,20 +13,20 @@ import MyReviewsList from "@/components/MyReviewsList";
import BottomSheet from "@/components/BottomSheet";
import { getCuisineIcon } from "@/lib/cuisine-icons";
const CUISINE_GROUPS: { label: string; prefix: string }[] = [
{ label: "한식", prefix: "한식" },
{ label: "일식", prefix: "일식" },
{ label: "중식", prefix: "중식" },
{ label: "양식", prefix: "양식" },
{ label: "아시아", prefix: "아시아" },
{ label: "기타", prefix: "기타" },
const CUISINE_TAXONOMY: { category: string; items: string[] }[] = [
{ category: "한식", items: ["백반/한정식", "국밥/해장국", "찌개/전골/탕", "삼겹살/돼지구이", "소고기/한우구이", "곱창/막창", "닭/오리구이", "족발/보쌈", "회/횟집", "해산물", "분식", "면", "죽/죽집", "순대/순대국", "장어/민물", "주점/포차", "파인다이닝/코스"] },
{ category: "일식", items: ["스시/오마카세", "라멘", "돈카츠", "텐동/튀김", "이자카야", "야키니쿠", "카레", "소바/우동", "파인다이닝/코스"] },
{ category: "중식", items: ["중화요리", "마라/훠궈", "딤섬/만두", "양꼬치", "파인다이닝/코스"] },
{ category: "양식", items: ["파스타/이탈리안", "스테이크", "햄버거", "피자", "프렌치", "바베큐", "브런치", "비건/샐러드", "파인다이닝/코스"] },
{ category: "아시아", items: ["베트남", "태국", "인도/중동", "동남아기타"] },
{ category: "기타", items: ["치킨", "카페/디저트", "베이커리", "뷔페", "퓨전"] },
];
function matchCuisineGroup(cuisineType: string | null, group: string): boolean {
if (!cuisineType) return false;
const g = CUISINE_GROUPS.find((g) => g.label === group);
if (!g) return false;
return cuisineType.startsWith(g.prefix);
function matchCuisineFilter(cuisineType: string | null, filter: string): boolean {
if (!cuisineType || !filter) return false;
// filter can be a category ("한식") or full type ("한식|백반/한정식")
if (filter.includes("|")) return cuisineType === filter;
return cuisineType.startsWith(filter);
}
const PRICE_GROUPS: { label: string; test: (p: string) => boolean }[] = [
@@ -171,7 +171,7 @@ export default function Home() {
const filteredRestaurants = useMemo(() => {
return restaurants.filter((r) => {
if (channelFilter && !(r.channels || []).includes(channelFilter)) return false;
if (cuisineFilter && !matchCuisineGroup(r.cuisine_type, cuisineFilter)) return false;
if (cuisineFilter && !matchCuisineFilter(r.cuisine_type, cuisineFilter)) return false;
if (priceFilter && !matchPriceGroup(r.price_range, priceFilter)) return false;
if (countryFilter) {
const parsed = parseRegion(r.region);
@@ -458,8 +458,15 @@ export default function Home() {
className="border dark:border-gray-700 rounded-lg px-3 py-1.5 text-sm text-gray-600 dark:text-gray-300 dark:bg-gray-800"
>
<option value="">🍽 </option>
{CUISINE_GROUPS.map((g) => (
<option key={g.label} value={g.label}>🍽 {g.label}</option>
{CUISINE_TAXONOMY.map((g) => (
<optgroup key={g.category} label={`── ${g.category} ──`}>
<option value={g.category}>🍽 {g.category} </option>
{g.items.map((item) => (
<option key={`${g.category}|${item}`} value={`${g.category}|${item}`}>
&nbsp;&nbsp;{item}
</option>
))}
</optgroup>
))}
</select>
<select
@@ -684,8 +691,15 @@ export default function Home() {
className="border dark:border-gray-700 rounded-lg px-2.5 py-1.5 text-xs text-gray-600 dark:text-gray-300 bg-white dark:bg-gray-800"
>
<option value="">🍽 </option>
{CUISINE_GROUPS.map((g) => (
<option key={g.label} value={g.label}>🍽 {g.label}</option>
{CUISINE_TAXONOMY.map((g) => (
<optgroup key={g.category} label={`── ${g.category} ──`}>
<option value={g.category}>🍽 {g.category} </option>
{g.items.map((item) => (
<option key={`${g.category}|${item}`} value={`${g.category}|${item}`}>
&nbsp;&nbsp;{item}
</option>
))}
</optgroup>
))}
</select>
<select

View File

@@ -0,0 +1,70 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
namespace: tasteby
spec:
replicas: 1
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
spec:
imagePullSecrets:
- name: ocir-secret
containers:
- name: backend
image: icn.ocir.io/idyhsdamac8c/tasteby/backend:latest
ports:
- containerPort: 8000
envFrom:
- configMapRef:
name: tasteby-config
- secretRef:
name: tasteby-secrets
volumeMounts:
- name: oracle-wallet
mountPath: /etc/oracle/wallet
readOnly: true
- name: oci-config
mountPath: /root/.oci
readOnly: true
resources:
requests:
cpu: 500m
memory: 768Mi
limits:
cpu: "1"
memory: 1536Mi
readinessProbe:
tcpSocket:
port: 8000
initialDelaySeconds: 30
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 8000
initialDelaySeconds: 60
periodSeconds: 30
volumes:
- name: oracle-wallet
secret:
secretName: oracle-wallet
- name: oci-config
secret:
secretName: oci-config
---
apiVersion: v1
kind: Service
metadata:
name: backend
namespace: tasteby
spec:
selector:
app: backend
ports:
- port: 8000
targetPort: 8000

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: joungmin@tasteby.net
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: nginx

14
k8s/configmap.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: tasteby-config
namespace: tasteby
data:
REDIS_HOST: "redis"
REDIS_PORT: "6379"
REDIS_DB: "0"
ORACLE_WALLET: "/etc/oracle/wallet"
OCI_CHAT_ENDPOINT: "https://inference.generativeai.us-ashburn-1.oci.oraclecloud.com"
OCI_GENAI_ENDPOINT: "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
OCI_EMBED_MODEL_ID: "cohere.embed-v4.0"
GOOGLE_CLIENT_ID: "635551099330-2l003d3ernjmkqavd4f6s78r8r405iml.apps.googleusercontent.com"

View File

@@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
namespace: tasteby
spec:
replicas: 1
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
spec:
imagePullSecrets:
- name: ocir-secret
containers:
- name: frontend
image: icn.ocir.io/idyhsdamac8c/tasteby/frontend:latest
ports:
- containerPort: 3001
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
readinessProbe:
httpGet:
path: /
port: 3001
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /
port: 3001
initialDelaySeconds: 15
periodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
name: frontend
namespace: tasteby
spec:
selector:
app: frontend
ports:
- port: 3001
targetPort: 3001

48
k8s/ingress.yaml Normal file
View File

@@ -0,0 +1,48 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tasteby-ingress
namespace: tasteby
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
# Redirect tasteby.net → www.tasteby.net
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- www.tasteby.net
- tasteby.net
secretName: tasteby-tls
rules:
- host: www.tasteby.net
http:
paths:
- path: /api/
pathType: Prefix
backend:
service:
name: backend
port:
number: 8000
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 3001
- host: tasteby.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 3001

4
k8s/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: tasteby

39
k8s/redis-deployment.yaml Normal file
View File

@@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: tasteby
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: tasteby
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379

17
k8s/secrets.yaml.template Normal file
View File

@@ -0,0 +1,17 @@
# Copy this to secrets.yaml and fill in real values.
# DO NOT commit secrets.yaml to git!
apiVersion: v1
kind: Secret
metadata:
name: tasteby-secrets
namespace: tasteby
type: Opaque
stringData:
ORACLE_USER: "<oracle-username>"
ORACLE_PASSWORD: "<oracle-password>"
ORACLE_DSN: "<tns-alias>_high?TNS_ADMIN=/etc/oracle/wallet"
JWT_SECRET: "<jwt-secret>"
OCI_COMPARTMENT_ID: "<oci-compartment-id>"
OCI_CHAT_MODEL_ID: "<oci-chat-model-id>"
GOOGLE_MAPS_API_KEY: "<google-maps-api-key>"
YOUTUBE_DATA_API_KEY: "<youtube-data-api-key>"