youtube-content-strategist
YouTube Content Strategist
Create a data-driven 30-day content calendar by analyzing your channel + niche benchmarks.
Usage
/youtube-content-strategist @MyChannel --niche "productivity"
/youtube-content-strategist @MyChannel --niche "cooking recipes" --uploads-per-week 3
/youtube-content-strategist UCxxxxxxx --niche "fitness"
Instructions
When the user invokes this skill:
Step 1: Parse Arguments
Extract:
- Channel (required):
@handle, URL, or channel ID - --niche "keyword" (required): The niche/topic area
- --uploads-per-week N (optional): Target upload frequency (default: auto-detect from history)
Step 2: Get API Key
Check Claude memory for YouTube Data API v3 key. If not found, ask:
"I need a YouTube Data API v3 key. You can get one from the Google Cloud Console. Please paste your key."
Step 3: Write the Script
Write the following Python script to /tmp/_yt_content_strategist_XXXX.py (where XXXX is a random suffix, e.g. $(openssl rand -hex 4)):
#!/usr/bin/env python3
"""
YouTube Content Strategist
Analyzes channel performance and niche benchmarks to generate
a data-driven content strategy and 30-day calendar.
"""
import argparse
import json
import os
import re
import sys
from datetime import datetime, timezone, timedelta
from collections import Counter
try:
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
except ImportError:
print("ERROR: google-api-python-client not installed. Run: pip3 install google-api-python-client")
sys.exit(1)
def parse_duration(iso_duration: str) -> int:
m = re.match(r"PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?", iso_duration or "")
if not m: return 0
return int(m.group(1) or 0) * 3600 + int(m.group(2) or 0) * 60 + int(m.group(3) or 0)
def format_number(n: int) -> str:
if n >= 1_000_000: return f"{n/1_000_000:.1f}M"
if n >= 1_000: return f"{n/1_000:.1f}K"
return str(n)
def days_since(published_at: str) -> float:
try:
pub = datetime.fromisoformat(published_at.replace("Z", "+00:00"))
return max((datetime.now(timezone.utc) - pub).total_seconds() / 86400, 0.1)
except Exception:
return 1
def resolve_channel(youtube, channel_input: str) -> dict:
raw = channel_input.strip().rstrip("/")
for pattern in [
r"youtube\.com/@([\w.-]+)",
r"youtube\.com/channel/(UC[\w-]{22})",
r"youtube\.com/c/([\w.-]+)",
]:
m = re.search(pattern, raw)
if m:
raw = m.group(1)
break
if raw.startswith("UC") and len(raw) == 24:
resp = youtube.channels().list(part="snippet,statistics,contentDetails", id=raw).execute()
if resp.get("items"):
item = resp["items"][0]
return {"id": item["id"], "title": item["snippet"]["title"],
"uploads_playlist": item["contentDetails"]["relatedPlaylists"]["uploads"],
"subscribers": int(item["statistics"].get("subscriberCount", 0)),
"total_views": int(item["statistics"].get("viewCount", 0)),
"video_count": int(item["statistics"].get("videoCount", 0))}
return None
handle = raw.lstrip("@")
try:
resp = youtube.channels().list(part="snippet,statistics,contentDetails", forHandle=handle).execute()
if resp.get("items"):
item = resp["items"][0]
return {"id": item["id"], "title": item["snippet"]["title"],
"uploads_playlist": item["contentDetails"]["relatedPlaylists"]["uploads"],
"subscribers": int(item["statistics"].get("subscriberCount", 0)),
"total_views": int(item["statistics"].get("viewCount", 0)),
"video_count": int(item["statistics"].get("videoCount", 0))}
except HttpError: pass
try:
s = youtube.search().list(part="snippet", q=handle, type="channel", maxResults=1).execute()
if s.get("items"):
cid = s["items"][0]["snippet"]["channelId"]
resp = youtube.channels().list(part="snippet,statistics,contentDetails", id=cid).execute()
if resp.get("items"):
item = resp["items"][0]
return {"id": item["id"], "title": item["snippet"]["title"],
"uploads_playlist": item["contentDetails"]["relatedPlaylists"]["uploads"],
"subscribers": int(item["statistics"].get("subscriberCount", 0)),
"total_views": int(item["statistics"].get("viewCount", 0)),
"video_count": int(item["statistics"].get("videoCount", 0))}
except HttpError: pass
return None
def fetch_playlist_videos(youtube, playlist_id: str, max_items: int = 200) -> list:
video_ids = []
next_page = None
while len(video_ids) < max_items:
try:
resp = youtube.playlistItems().list(
part="contentDetails,snippet", playlistId=playlist_id,
maxResults=min(50, max_items - len(video_ids)), pageToken=next_page
).execute()
except HttpError as e:
print(f" Playlist error: {e}")
break
for item in resp.get("items", []):
video_ids.append({
"video_id": item["contentDetails"]["videoId"],
"published_at": item["snippet"]["publishedAt"],
})
next_page = resp.get("nextPageToken")
if not next_page: break
return video_ids
def fetch_video_details(youtube, video_ids: list) -> list:
videos = []
for i in range(0, len(video_ids), 50):
batch = video_ids[i:i+50]
try:
resp = youtube.videos().list(
part="snippet,statistics,contentDetails",
id=",".join(batch),
).execute()
videos.extend(resp.get("items", []))
except HttpError as e:
print(f" Video details error: {e}")
return videos
def fetch_playlists(youtube, channel_id: str) -> list:
playlists = []
next_page = None
while True:
try:
resp = youtube.playlists().list(
part="snippet,contentDetails", channelId=channel_id,
maxResults=50, pageToken=next_page
).execute()
except HttpError: break
for item in resp.get("items", []):
playlists.append({
"id": item["id"],
"title": item["snippet"]["title"],
"video_count": item["contentDetails"]["itemCount"],
})
next_page = resp.get("nextPageToken")
if not next_page: break
return playlists
def classify_content(title: str, tags: list, duration_sec: int) -> dict:
title_lower = title.lower()
tag_str = " ".join(t.lower() for t in tags)
combined = f"{title_lower} {tag_str}"
content_type = "other"
if duration_sec <= 60:
content_type = "short"
elif re.search(r'\b(tutorial|how to|guide|learn|step by step)\b', combined):
content_type = "tutorial"
elif re.search(r'\b(review|unbox|first look|hands on|comparison)\b', combined):
content_type = "review"
elif re.search(r'\b(vlog|day in|daily|life|routine)\b', combined):
content_type = "vlog"
elif re.search(r'\b(tips|tricks|hack|mistakes|advice)\b', combined):
content_type = "tips"
elif re.search(r'\b(interview|podcast|conversation|talk|chat with|feat)\b', combined):
content_type = "interview"
elif re.search(r'\b(news|update|announcement|breaking|latest)\b', combined):
content_type = "news"
elif re.search(r'\b(challenge|experiment|test|try|attempt)\b', combined):
content_type = "challenge"
elif re.search(r'\b(top \d|best \d|\d+ best|\d+ ways|\d+ things)\b', combined):
content_type = "listicle"
elif re.search(r'\b(story|experience|journey|honest|real|truth)\b', combined):
content_type = "storytelling"
duration_bucket = "short"
if duration_sec > 1800: duration_bucket = "long (30+ min)"
elif duration_sec > 900: duration_bucket = "standard (15-30 min)"
elif duration_sec > 300: duration_bucket = "medium (5-15 min)"
elif duration_sec > 60: duration_bucket = "short-form (1-5 min)"
return {"content_type": content_type, "duration_bucket": duration_bucket}
def main():
parser = argparse.ArgumentParser(description="YouTube content strategy generator")
parser.add_argument("channel", help="Channel @handle, URL, or ID")
parser.add_argument("--niche", required=True, help="Niche/topic keyword")
parser.add_argument("--uploads-per-week", type=int, default=0)
parser.add_argument("--output-dir", default=None)
args = parser.parse_args()
api_key = os.environ.get("YT_API_KEY")
if not api_key:
print("ERROR: YT_API_KEY environment variable not set.")
sys.exit(1)
youtube = build("youtube", "v3", developerKey=api_key)
quota_used = 0
# --- Resolve channel ---
print(f"Resolving channel: {args.channel}")
channel = resolve_channel(youtube, args.channel)
quota_used += 1
if not channel:
print("ERROR: Could not resolve channel.")
sys.exit(1)
print(f" Found: {channel['title']} ({format_number(channel['subscribers'])} subs)")
# --- Fetch channel videos ---
print("Fetching channel uploads (last 200)...")
playlist_items = fetch_playlist_videos(youtube, channel["uploads_playlist"], 200)
quota_used += (len(playlist_items) + 49) // 50
vid_ids = [v["video_id"] for v in playlist_items]
print(f" Found {len(vid_ids)} videos")
print("Fetching video details...")
raw_videos = fetch_video_details(youtube, vid_ids)
quota_used += (len(vid_ids) + 49) // 50
print(f" Got {len(raw_videos)} videos")
# --- Process channel videos ---
channel_videos = []
for v in raw_videos:
views = int(v.get("statistics", {}).get("viewCount", 0))
likes = int(v.get("statistics", {}).get("likeCount", 0))
comments = int(v.get("statistics", {}).get("commentCount", 0))
duration = parse_duration(v.get("contentDetails", {}).get("duration", ""))
tags = v.get("snippet", {}).get("tags", [])
title = v["snippet"]["title"]
classification = classify_content(title, tags, duration)
age = days_since(v["snippet"]["publishedAt"])
channel_videos.append({
"video_id": v["id"],
"title": title,
"views": views,
"likes": likes,
"comments": comments,
"duration_sec": duration,
"published_at": v["snippet"]["publishedAt"],
"age_days": round(age, 1),
"tags": tags,
"content_type": classification["content_type"],
"duration_bucket": classification["duration_bucket"],
"engagement_rate": round((likes + comments) / max(views, 1) * 100, 2),
"velocity": round(views / max(age, 0.1)),
})
channel_videos.sort(key=lambda x: x["published_at"], reverse=True)
# --- Analyze content pillars ---
type_counter = Counter(v["content_type"] for v in channel_videos)
type_performance = {}
for ct in type_counter:
vids = [v for v in channel_videos if v["content_type"] == ct]
type_performance[ct] = {
"count": len(vids),
"pct": round(len(vids) / len(channel_videos) * 100, 1),
"avg_views": round(sum(v["views"] for v in vids) / len(vids)),
"avg_engagement": round(sum(v["engagement_rate"] for v in vids) / len(vids), 2),
"total_views": sum(v["views"] for v in vids),
}
# --- Shorts vs long-form analysis ---
shorts = [v for v in channel_videos if v["content_type"] == "short"]
longform = [v for v in channel_videos if v["content_type"] != "short"]
shorts_analysis = {
"count": len(shorts),
"pct": round(len(shorts) / max(len(channel_videos), 1) * 100, 1),
"avg_views": round(sum(v["views"] for v in shorts) / max(len(shorts), 1)),
"avg_engagement": round(sum(v["engagement_rate"] for v in shorts) / max(len(shorts), 1), 2),
}
longform_analysis = {
"count": len(longform),
"pct": round(len(longform) / max(len(channel_videos), 1) * 100, 1),
"avg_views": round(sum(v["views"] for v in longform) / max(len(longform), 1)),
"avg_engagement": round(sum(v["engagement_rate"] for v in longform) / max(len(longform), 1), 2),
}
# --- Duration analysis ---
duration_counter = Counter(v["duration_bucket"] for v in channel_videos)
duration_performance = {}
for db in duration_counter:
vids = [v for v in channel_videos if v["duration_bucket"] == db]
duration_performance[db] = {
"count": len(vids),
"avg_views": round(sum(v["views"] for v in vids) / len(vids)),
}
# --- Upload schedule analysis ---
if len(channel_videos) >= 2:
dates = sorted([datetime.fromisoformat(v["published_at"].replace("Z", "+00:00")) for v in channel_videos])
gaps = [(dates[i+1] - dates[i]).total_seconds() / 86400 for i in range(len(dates)-1)]
avg_gap = sum(gaps) / len(gaps) if gaps else 7
uploads_per_week = round(7 / max(avg_gap, 0.1), 1)
day_counter = Counter(d.strftime("%A") for d in dates)
hour_counter = Counter(d.hour for d in dates)
else:
avg_gap = 7
uploads_per_week = 1
day_counter = Counter()
hour_counter = Counter()
# --- Sequel opportunities ---
avg_views = sum(v["views"] for v in channel_videos) / max(len(channel_videos), 1)
sequel_candidates = [v for v in channel_videos if v["views"] > avg_views * 2 and v["age_days"] > 60]
sequel_candidates.sort(key=lambda x: x["views"], reverse=True)
# --- Fetch niche benchmark data ---
print(f"\nFetching niche benchmark data for: {args.niche}")
try:
niche_resp = youtube.search().list(
part="snippet", q=args.niche, type="video",
order="viewCount", maxResults=50,
).execute()
niche_ids = [item["id"]["videoId"] for item in niche_resp.get("items", [])]
quota_used += 100
except HttpError:
niche_ids = []
niche_videos = []
if niche_ids:
niche_raw = fetch_video_details(youtube, niche_ids)
quota_used += 1
for v in niche_raw:
views = int(v.get("statistics", {}).get("viewCount", 0))
duration = parse_duration(v.get("contentDetails", {}).get("duration", ""))
niche_videos.append({"views": views, "duration_sec": duration,
"title": v["snippet"]["title"]})
niche_benchmark = {
"avg_views": round(sum(v["views"] for v in niche_videos) / max(len(niche_videos), 1)),
"avg_duration": round(sum(v["duration_sec"] for v in niche_videos) / max(len(niche_videos), 1)),
"sample_size": len(niche_videos),
}
# --- Fetch playlists ---
print("Fetching playlists...")
playlists = fetch_playlists(youtube, channel["id"])
quota_used += 1
# --- Build output ---
target_uploads = args.uploads_per_week if args.uploads_per_week > 0 else round(uploads_per_week)
output = {
"channel": channel,
"niche": args.niche,
"analyzed_at": datetime.now(timezone.utc).isoformat(),
"total_videos_analyzed": len(channel_videos),
"content_pillars": type_performance,
"shorts_vs_longform": {"shorts": shorts_analysis, "longform": longform_analysis},
"duration_performance": duration_performance,
"upload_schedule": {
"current_uploads_per_week": uploads_per_week,
"target_uploads_per_week": target_uploads,
"avg_days_between_uploads": round(avg_gap, 1),
"preferred_days": day_counter.most_common(3),
"preferred_hours": hour_counter.most_common(3),
},
"sequel_opportunities": [
{"title": v["title"], "video_id": v["video_id"], "views": v["views"],
"age_days": v["age_days"], "content_type": v["content_type"]}
for v in sequel_candidates[:10]
],
"niche_benchmark": niche_benchmark,
"playlists": playlists,
"top_performing": [
{"title": v["title"], "video_id": v["video_id"], "views": v["views"],
"engagement_rate": v["engagement_rate"], "content_type": v["content_type"],
"duration_sec": v["duration_sec"]}
for v in sorted(channel_videos, key=lambda x: x["views"], reverse=True)[:10]
],
"recent_videos": channel_videos[:20],
"all_videos": channel_videos,
"quota_used": {"total_estimated": quota_used},
}
# --- Save ---
safe_channel = re.sub(r'[^a-zA-Z0-9_-]', '_', channel["title"])[:100]
date_str = datetime.now().strftime("%Y%m%d")
output_dir = args.output_dir or f"yt_strategy_{safe_channel}_{date_str}"
os.makedirs(output_dir, exist_ok=True)
output_file = os.path.join(output_dir, "strategy_data.json")
try:
with open(output_file, "w", encoding="utf-8") as f:
json.dump(output, f, indent=2, ensure_ascii=False)
except (IOError, OSError) as e:
print(f"ERROR: Could not write output file: {e}")
sys.exit(1)
print(f"\nData saved to: {output_file}")
print(f"Videos analyzed: {len(channel_videos)}")
print(f"Content types found: {len(type_counter)}")
print(f"Current upload rate: {uploads_per_week}/week")
print(f"Sequel opportunities: {len(sequel_candidates)}")
print(f"Estimated quota used: ~{quota_used} units")
if __name__ == "__main__":
main()
Step 4: Install Dependencies
pip3 install google-api-python-client
Step 5: Run the Script
YT_API_KEY=API_KEY python3 /tmp/_yt_content_strategist_XXXX.py "@CHANNEL" --niche "NICHE" [--uploads-per-week N]
Step 6: Clean Up
rm -f /tmp/_yt_content_strategist_XXXX.py
Step 7: Read the Data
Read the generated strategy_data.json file.
Step 8: Generate the Strategy Report
Write a report to the output directory as content_strategy_report.md:
# Content Strategy: [Channel Name]
*Niche: [Niche] | Analyzed [date] | [N] videos analyzed*
## Channel Position Assessment
Where does this channel stand? Subscribers, total views, video count.
How does it compare to niche benchmarks?
## Content Mix Analysis
### Current Mix
| Content Type | Count | % | Avg Views | Avg Engagement |
|-------------|-------|---|-----------|----------------|
What's working best? What's underperforming?
### Optimal Mix Recommendation
Based on performance data, recommend shifting the mix.
## Shorts vs Long-Form Strategy
| Metric | Shorts | Long-Form |
|--------|--------|-----------|
Which is performing better for this channel?
Recommendation on Shorts strategy.
## Optimal Video Duration
| Duration Bucket | Count | Avg Views |
|----------------|-------|-----------|
What duration sweet spot should this channel target?
## Upload Schedule
| Metric | Current | Recommended |
|--------|---------|-------------|
Best days and times based on historical data.
Upload frequency recommendation with reasoning.
## Content Pillars (Ranked by Impact)
For each content pillar:
- Performance metrics
- Strategic role (growth, engagement, authority, etc.)
- Recommendation (double down / maintain / reduce / try)
## Sequel & Follow-Up Opportunities
Videos that outperformed and deserve sequels.
| Original Video | Views | Age | Suggested Follow-Up |
|---------------|-------|-----|---------------------|
## Playlist Strategy
Current playlists and their sizes.
Recommendations for new playlists or series.
## 30-Day Content Calendar
Generate a concrete calendar:
| Week | Day | Video Title Idea | Type | Duration | Rationale |
|------|-----|------------------|------|----------|-----------|
| 1 | Mon | ... | tutorial | 12 min | Top-performing format |
| 1 | Thu | ... | tips | 8 min | High engagement topic |
...
Base every recommendation on actual data from the analysis.
## Growth Levers (Ranked by Impact)
1. **[Lever]** - Data backing - Expected impact
2. **[Lever]** - Data backing - Expected impact
...
## Quota Usage
| Operation | Units |
|-----------|-------|
Step 9: Report Completion
Tell the user:
- Output folder path
- Channel position summary
- Top content pillar finding
- Upload schedule recommendation
- 30-day calendar overview
- Quota consumed
More from nikhilbhansali/youtube-data-skills
youtube-thumbnails
Download top 10 thumbnails for videos, shorts, and live streams from any YouTube channel. Creates an Obsidian-compatible index with embedded thumbnails. Use when the user wants to download thumbnails, analyze thumbnail designs, or create a visual overview of a YouTube channel's content. Accepts @handle, channel URL, or channel ID.
2youtube-topic-researcher
Research any YouTube topic or niche using YouTube Data API v3. Analyze top-performing videos, find content gaps, identify outlier videos, assess niche saturation, and generate data-driven video ideas. Use when users want to (1) Research a topic before making videos, (2) Find content gaps in a niche, (3) Validate whether a niche is worth entering, (4) Discover what's working for a keyword, (5) Find underserved subtopics, (6) Get video ideas backed by data. Requires user's YouTube Data API v3 key.
2youtube-trending-scanner
Scan what's trending right now in any YouTube niche using YouTube Data API v3. Find velocity outliers, rising channels, breakout videos, and emerging topics. Use when users want to (1) See what's trending in their niche right now, (2) Find breakout videos getting disproportionate views, (3) Discover rising channels with unusual traction, (4) Catch trends before they peak, (5) Find outdated content to remake, (6) Identify first-mover opportunities. Requires user's YouTube Data API v3 key.
2youtube-comment-miner
Mine YouTube comments for content ideas, audience questions, pain points, and monetization signals using YouTube Data API v3. Analyze comments from specific videos, top videos of a channel, or search results for a topic. Use when users want to (1) Find what their audience is asking for, (2) Mine content ideas from comments, (3) Discover audience pain points, (4) Find FAQ patterns in comments, (5) Detect monetization signals, (6) Understand audience language and sentiment. Requires user's YouTube Data API v3 key.
2youtube-competitor-analyzer
Find and analyze YouTube competitor channels using YouTube Data API v3. Discover competitors through keyword search, category matching, content similarity, and related channel discovery. Compare metrics, content strategies, and market positioning. Use when users want to (1) Find competitors for their YouTube channel, (2) Analyze competitor performance metrics, (3) Compare their channel against competitors, (4) Identify content gaps and opportunities, (5) Benchmark against similar creators, (6) Generate competitive analysis reports. Requires user's YouTube Data API v3 key.
2youtube-title-tag-optimizer
Optimize YouTube video titles, tags, and descriptions before publishing using YouTube Data API v3. Analyze top-ranking videos for a keyword to reverse-engineer winning title patterns, extract effective tags, and generate optimized title variations. Use when users want to (1) Optimize a video title before publishing, (2) Find the best tags for a video, (3) Analyze what title patterns work for a keyword, (4) Score an existing title against competitors, (5) Build an optimized tag set, (6) Get description SEO templates. Requires user's YouTube Data API v3 key.
2