Urban Planning Lecture Notes Pdf May 2026

def _show_case_studies(self): print("\nšŸ“‹ CASE STUDIES:") for i, case in enumerate(self.analyzer.case_studies[:5], 1): print(f"\ni. case['title']") print(f" case['description'][:200]...")

def search_similar_content(self, query: str, top_k: int = 3) -> List[Dict]: """Search for content similar to query using TF-IDF""" # Prepare documents (each page as a document) documents = [page['text'] for page in self.pages_text] documents.append(query) # Create TF-IDF matrix vectorizer = TfidfVectorizer(stop_words='english') tfidf_matrix = vectorizer.fit_transform(documents) # Calculate similarity cosine_similarities = cosine_similarity(tfidf_matrix[-1:], tfidf_matrix[:-1]) # Get top similar pages similar_indices = cosine_similarities.argsort()[0][-top_k:][::-1] results = [] for idx in similar_indices: if cosine_similarities[0][idx] > 0: results.append( 'page_number': self.pages_text[idx]['page_num'], 'similarity_score': float(cosine_similarities[0][idx]), 'excerpt': self.pages_text[idx]['text'][:500] ) return results urban planning lecture notes pdf

def extract_key_concepts(self) -> List[Dict]: """Extract and rank key urban planning concepts""" stop_words = set(stopwords.words('english')) # Urban planning specific terminology planning_terms = [ 'zoning', 'land use', 'transportation', 'infrastructure', 'sustainability', 'urban design', 'smart growth', 'new urbanism', 'gentrification', 'affordable housing', 'public space', 'transit-oriented development', 'mixed-use', 'walkability', 'green infrastructure', 'climate resilience', 'urban renewal', 'community engagement', 'comprehensive plan', 'subdivision', 'environmental impact', 'historic preservation', 'urban sprawl', 'density', 'parking', 'complete streets', 'placemaking' ] # Tokenize and find frequencies words = word_tokenize(self.full_text.lower()) words = [w for w in words if w.isalpha() and w not in stop_words] # Count frequencies of planning terms concept_counts = Counter() for term in planning_terms: count = self.full_text.lower().count(term) if count > 0: concept_counts[term] = count # Extract context for each concept concepts = [] for concept, count in concept_counts.most_common(20): # Find sentences containing the concept sentences = sent_tokenize(self.full_text) context_sentences = [s for s in sentences if concept.lower() in s.lower()] context = context_sentences[:2] if context_sentences else [] concepts.append( 'term': concept, 'frequency': count, 'context': context ) self.key_concepts = concepts return concepts case in enumerate(self.analyzer.case_studies[:5]

import PyPDF2 import re from typing import List, Dict, Tuple import json from collections import Counter import nltk from nltk.corpus import stopwords from nltk.tokenize import sent_tokenize, word_tokenize from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import pandas as pd import spacy Download required NLTK data nltk.download('punkt') nltk.download('stopwords') nltk.download('averaged_perceptron_tagger') Load spaCy model (run: python -m spacy download en_core_web_sm) nlp = spacy.load('en_core_web_sm') top_k: int = 3) -&gt

def create_summary(self) -> Dict: """Create a structured summary of the lecture notes""" summary = 'total_pages': len(self.pages_text), 'total_words': len(self.full_text.split()), 'key_topics': [c['term'] for c in self.key_concepts[:15]], 'case_studies_count': len(self.case_studies), 'main_sections': list(self.sections.keys())[:10], 'core_principles': self._extract_principles(), 'recommended_focus_areas': self._identify_focus_areas() return summary

def extract_text_from_pdf(self) -> str: """Extract text from PDF file""" text = "" with open(self.pdf_path, 'rb') as file: pdf_reader = PyPDF2.PdfReader(file) for page_num, page in enumerate(pdf_reader.pages): page_text = page.extract_text() self.pages_text.append( 'page_num': page_num + 1, 'text': page_text ) text += page_text + "\n" self.full_text = text return text

def _show_summary(self): summary = self.analyzer.create_summary() print("\nšŸ“Š LECTURE SUMMARY:") print(f" Pages: summary['total_pages']") print(f" Total Words: summary['total_words']:,") print(f" Case Studies: summary['case_studies_count']") print(f"\n Main Topics: ', '.join(summary['key_topics'][:10])") print(f"\n Key Sections: ', '.join(summary['main_sections'][:5])")

Did this answer your question? Thanks for the feedback There was a problem submitting your feedback. Please try again later.

Still need help? Contact Us Contact Us