diff --git a/custom-pg_hba.conf b/custom-pg_hba.conf index cc40507..3fe8084 100644 --- a/custom-pg_hba.conf +++ b/custom-pg_hba.conf @@ -1,31 +1,104 @@ # PostgreSQL Client Authentication Configuration File # =================================================== # -# TYPE DATABASE USER ADDRESS METHOD +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# plain TCP/IP socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + + + +# DO NOT DISABLE! +# If you change this first entry you will need to make sure that the +# database superuser can access the database using some other method. +# Noninteractive access to all databases is required during automatic +# maintenance (custom daily cronjobs, replication, and similar tasks). +# # Database administrative login by Unix domain socket local all postgres peer +# TYPE DATABASE USER ADDRESS METHOD + # "local" is for Unix domain socket connections only # md5認証に変更してpeer認証エラーを回避 local all all md5 - # IPv4 local connections: host all all 127.0.0.1/32 md5 - # IPv6 local connections: host all all ::1/128 md5 - # Allow replication connections from localhost, by a user with the # replication privilege. local replication all md5 host replication all 127.0.0.1/32 md5 host replication all ::1/128 md5 - -# Docker network connections -host all all 172.0.0.0/8 md5 -host all all 192.168.0.0/16 md5 -host all all 0.0.0.0/0 md5 - -# Replication connections -host replication replicator 0.0.0.0/0 md5 +host all all 172.0.0.0/8 md5 +host all all 192.168.0.0/16 md5 +host all all 0.0.0.0/0 md5 +host replication replicator 0.0.0.0/0 md5 diff --git a/requirements.txt b/requirements.txt index 85602a7..ab392ca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -82,3 +82,4 @@ haversine piexif==1.1.3 Pillow>=8.0.0 +boto3 diff --git a/rog/utils/__init__.py b/rog/utils/__init__.py new file mode 100644 index 0000000..5f24585 --- /dev/null +++ b/rog/utils/__init__.py @@ -0,0 +1 @@ +# Python package marker diff --git a/rog/utils/s3_image_uploader.py b/rog/utils/s3_image_uploader.py new file mode 100644 index 0000000..b28613f --- /dev/null +++ b/rog/utils/s3_image_uploader.py @@ -0,0 +1,153 @@ +""" +S3画像アップロードユーティリティ +チェックイン時の画像をS3にアップロードし、URLを生成します +""" + +import boto3 +import logging +import uuid +from datetime import datetime +from django.conf import settings +from botocore.exceptions import ClientError, NoCredentialsError +import base64 +import io +import requests + +logger = logging.getLogger(__name__) + +class S3ImageUploader: + def __init__(self): + """S3クライアントを初期化""" + try: + self.s3_client = boto3.client( + 's3', + aws_access_key_id=getattr(settings, 'AWS_ACCESS_KEY', None), + aws_secret_access_key=getattr(settings, 'AWS_SECRET_ACCESS_KEY', None), + region_name=getattr(settings, 'AWS_REGION', 'us-west-2') + ) + self.bucket_name = getattr(settings, 'S3_BUCKET_NAME', 'sumasenrogaining') + logger.info(f"S3 client initialized for bucket: {self.bucket_name}") + except Exception as e: + logger.error(f"Failed to initialize S3 client: {e}") + self.s3_client = None + self.bucket_name = None + + def upload_checkin_image(self, image_data, event_code, zekken_number, cp_number): + """ + チェックイン画像をS3にアップロード + + Args: + image_data: 画像データ(URLまたはBase64) + event_code: イベントコード + zekken_number: ゼッケン番号 + cp_number: チェックポイント番号 + + Returns: + str: S3のURL、失敗時は元のimage_data + """ + if not self.s3_client or not image_data: + logger.warning("S3 client not available or no image data, returning original") + return image_data + + try: + # 画像データを取得 + image_binary = self._get_image_binary(image_data) + if not image_binary: + logger.error("Failed to get image binary data, returning original") + return image_data + + # S3キーを生成: {event_code}/{zekken_number}/{cp_number}.jpg + s3_key = f"{event_code}/{zekken_number}/{cp_number}.jpg" + + # S3にアップロード + self.s3_client.put_object( + Bucket=self.bucket_name, + Key=s3_key, + Body=image_binary, + ContentType='image/jpeg', + ACL='public-read' # 公開読み取り可能 + ) + + # S3 URLを生成 + aws_region = getattr(settings, 'AWS_REGION', 'us-west-2') + s3_url = f"https://{self.bucket_name}.s3.{aws_region}.amazonaws.com/{s3_key}" + + logger.info(f"Successfully uploaded image to S3: {s3_url}") + return s3_url + + except ClientError as e: + logger.error(f"S3 upload failed: {e}, returning original URL") + return image_data + except Exception as e: + logger.error(f"Unexpected error during S3 upload: {e}, returning original URL") + return image_data + + def _get_image_binary(self, image_data): + """ + 画像データからバイナリデータを取得 + + Args: + image_data: 画像URL(HTTP)またはBase64エンコードされた画像データ + + Returns: + bytes: 画像のバイナリデータ + """ + try: + if isinstance(image_data, str): + # HTTPURLの場合 + if image_data.startswith('http'): + return self._download_image_from_url(image_data) + # Base64の場合 + elif self._is_base64(image_data): + return base64.b64decode(image_data) + # data:image/jpeg;base64,の形式の場合 + elif image_data.startswith('data:image'): + base64_data = image_data.split(',')[1] + return base64.b64decode(base64_data) + + logger.error(f"Unsupported image data format: {type(image_data)}") + return None + + except Exception as e: + logger.error(f"Error processing image data: {e}") + return None + + def _download_image_from_url(self, url): + """URLから画像をダウンロード""" + try: + response = requests.get(url, timeout=30) + response.raise_for_status() + return response.content + except Exception as e: + logger.error(f"Failed to download image from URL {url}: {e}") + return None + + def _is_base64(self, data): + """文字列がBase64かどうかをチェック""" + try: + if isinstance(data, str): + base64.b64decode(data, validate=True) + return True + except Exception: + return False + return False + + def generate_s3_url(self, event_code, zekken_number, cp_number): + """ + S3 URLを生成(アップロード済みの画像用) + + Args: + event_code: イベントコード + zekken_number: ゼッケン番号 + cp_number: チェックポイント番号 + + Returns: + str: S3のURL + """ + aws_region = getattr(settings, 'AWS_REGION', 'us-west-2') + s3_key = f"{event_code}/{zekken_number}/{cp_number}.jpg" + return f"https://{self.bucket_name}.s3.{aws_region}.amazonaws.com/{s3_key}" + + +# グローバルインスタンス +s3_uploader = S3ImageUploader() diff --git a/rog/views_apis/api_play.py b/rog/views_apis/api_play.py index 8f252de..f278889 100755 --- a/rog/views_apis/api_play.py +++ b/rog/views_apis/api_play.py @@ -15,6 +15,7 @@ from datetime import datetime import uuid import time from django.http import JsonResponse +from rog.utils.s3_image_uploader import s3_uploader logger = logging.getLogger(__name__) @@ -570,19 +571,34 @@ def checkin_from_rogapp(request): # トランザクション開始 with transaction.atomic(): + # S3に画像をアップロードし、S3 URLを取得 + s3_image_url = image_url + if image_url: + try: + s3_image_url = s3_uploader.upload_checkin_image( + image_data=image_url, + event_code=entry.event.event_name, + zekken_number=entry.zekken_number, + cp_number=cp_number + ) + logger.info(f"[CHECKIN] S3 upload - Original: {image_url[:50]}..., S3: {s3_image_url}") + except Exception as e: + logger.error(f"[CHECKIN] S3 upload failed, using original URL: {e}") + s3_image_url = image_url + # serial_numberを自動生成(既存の最大値+1) max_serial = GpsLog.objects.filter( zekken_number=entry.zekken_number, event_code=entry.event.event_name ).aggregate(max_serial=Max('serial_number'))['max_serial'] or 0 - # チェックポイント登録 + # チェックポイント登録(S3 URLを使用) checkpoint = GpsLog.objects.create( serial_number=max_serial + 1, zekken_number=entry.zekken_number, event_code=entry.event.event_name, cp_number=cp_number, - image_address=image_url, + image_address=s3_image_url, # S3 URLを保存 checkin_time=timezone.now(), create_at=timezone.now(), update_at=timezone.now(), @@ -638,7 +654,8 @@ def checkin_from_rogapp(request): "bonus_points": bonus_points, "scoring_breakdown": scoring_breakdown, "validation_status": "pending", - "requires_manual_review": bool(gps_coordinates.get('accuracy', 0) > 10) # 10m以上は要審査 + "requires_manual_review": bool(gps_coordinates.get('accuracy', 0) > 10), # 10m以上は要審査 + "image_url": s3_image_url # S3画像URLを返す }) except Exception as e: