# coding: utf-8
# 1. ライブラリの読み込み
import os, json
import imghdr
import urllib.parse
import urllib.request
import hashlib
import io
from PIL import Image
# バインド設定の読み込み
with open(os.path.dirname(__file__) + "/function.json", "r") as f:
bindings = json.load(f)
bindings = {bind["name"]: bind for bind in bindings["bindings"]}
# Face APIキー情報
face_api_key = "{API Key}"
face_api_endpoint = "https://japaneast.api.cognitive.microsoft.com/face/v1.0"
# Face APIへのリクエスト
def detect_face_from_data(api_key, body, endpoint_or_location="japaneast", returnFaceId=True, returnFaceLandmarks=False, returnFaceAttributes=[]):
# URL生成
params = urllib.parse.urlencode({
"returnFaceId": returnFaceId,
"returnFaceLandmarks": returnFaceLandmarks,
})
if returnFaceAttributes != []:
params["returnFaceAttributes"] = ",".join(returnFaceAttributes)
if "http" in endpoint_or_location:
url = "{}/detect?{}".format(endpoint_or_location, params)
else:
url = "https://{}.api.cognitive.microsoft.com/face/v1.0/detect?{}".format(endpoint_or_location, params)
# リクエストヘッダ
headers = {
"Ocp-Apim-Subscription-Key": api_key,
"Content-Type": "application/octet-stream"
}
# HTTPリクエスト
req = urllib.request.Request(url, data=body, headers=headers)
try:
with urllib.request.urlopen(req) as res:
res_json = json.loads(res.read())
except urllib.error.HTTPError as err:
print(err.code)
print(err.read())
res_json = []
except urllib.error.URLError as err:
print(err.reason)
res_json = []
return res_json
print("Function Start. ID:" + os.environ["INVOCATIONID"])
def handler():
# メッセージ読み込み
with open(os.environ["inputMessage"], "r") as f:
input_msg = json.load(f)
# コンテンツのダウンロード
print("Downloading input image")
dl_url = input_msg["img_url"]
req = urllib.request.Request(dl_url)
with urllib.request.urlopen(req) as res:
img_bytes = res.read()
# 実行履歴の取得
output_path = bindings["outputBlob"]["path"].format(blob_path=input_msg["blob_path"]).split("/")
container = output_path[0]
blob_path = "/".join(output_path[1:])
img_name = output_path[-1]
img_md5 = hashlib.md5(img_bytes).hexdigest()
# 画像フォーマット判定
print("Checking input image format")
img_format = imghdr.what(None, h=img_bytes)
if img_format in ["jpeg", "png", "bmp", "gif"]:
# 顔を検出
print("Detecting Faces")
detected_faces = detect_face_from_data(face_api_key, img_bytes, endpoint_or_location=face_api_endpoint)
if len(detected_faces):
result_message = "%s face(s) datected." % len(detected_faces)
else:
result_message = "No faces detected."
print(result_message)
# 2. 画像データをPillowの形式に変換
img_pil = Image.open(io.BytesIO(img_bytes))
img_raw_face = Image.open(os.environ["inputBlob"])
for face_info in detected_faces:
# 3. 検出した顔の位置・サイズ情報を取得
face_rect = face_info["faceRectangle"]
# 4. 顔画像のリサイズ
paste_face = img_raw_face.copy()
paste_face.thumbnail((face_rect["width"], face_rect["height"]), Image.ANTIALIAS)
# 5. 顔画像の貼り付け
img_pil.paste(paste_face, (face_rect["left"], face_rect["top"]))
# 6. 画像の出力
img_pil.save(os.environ["outputBlob"], img_format.upper(), quality=100)
#with open(os.environ["outputBlob"], "wb") as fw: # 置き換え
#fw.write(img_bytes)
# 出力メッセージの設定
result = {
"status": "ok",
#"message": "Image is uplaoded.",
"message": result_message,
"container": container,
"blob_path": blob_path,
"filename": img_name,
"content_md5": img_md5,
"input": input_msg
}
else:
# 出力メッセージの設定
result = {
"status": "error",
"filename": img_name,
"content_md5": img_md5,
"input": input_msg,
}
if img_format is None:
result["message"] = "Input file is not image."
else:
result["message"] = "Unsupported image format."
# キューメッセージの出力
print("Output Queue Message")
with open(os.environ["outputQueueItem"], "w") as fw:
json.dump(result, fw)
# 実行履歴の書き込み
print("Insert Log to Table")
insert_entity = {key: val for key, val in result.items()}
insert_entity.update({
"PartitionKey": "results",
"RowKey": os.environ["INVOCATIONID"]
})
with open(os.environ["outputTable"], "w") as fw:
json.dump(insert_entity, fw)
return
if __name__ == "__main__":
handler()