# 2. main function def crawler(maxpage, query, s_date, e_date, press): press = int(press_name) s_from = s_date.replace(".", "") e_to = e_date.replace(".", "") page =1 maxpage_t = (int(maxpage)-1)*10+1 f = open("D:/10.MyPython_work/nlp/web_crawl/contents_text.csv", 'w', encoding = 'utf-8') wr = csv.writer(f) wr.writerow(['years', 'company', 'title', 'contents', 'link']) while page < maxpage_t: url = 'https://search.naver.com/search.naver?where=news&query=' + query + '&sort=0&ds=' + s_date + '&de=' + e_date + '&news_office_checked='+ press + '&nso=so%3Ar%2Cp%3Afrom' + s_from + 'to' + e_to + '%2Ca%3A&start=' + str(page) # ua = UserAgent() # headers = {'User-Agent' : ua.random} req = requests.get(url) cont = req.content soup = BeautifulSoup(cont, 'html.parser') for urls in soup.select("a.info"): try: if urls["href"].startswith("https://news.naver.com"): news_detail = [] ua = UserAgent() headers = {"User-Agent" : ua.random} breq = requests.get(urls["href"], headers = headers) bsoup = BeautifulSoup(breq.content, 'html.parser') title = bsoup.select('h3#articleTitle')[0].text news_detail.append(title) pdate = bsoup.select('.t11')[0].get_text()[:11] news_detail.append(pdate) _text = bsoup.select('#articleBodyContents')[0].get_text().replace('\n', " ") btext = _text.replace("// flash 오류를 우회하기 위한 함수 추가 function _flash_removeCallback() {}", "") news_detail.append(btext.strip()) news_detail.append(urls["href"]) pcompany = bsoup.select('#footer address')[0].a.get_text() news_detail.append(pcompany) wr.writerow([news_detail[1].replace(',',''), news_detail[4].replace(',',''), news_detail[0].replace(',',''), news_detail[2].replace(',',''), news_detail[3].replace(',','')]) except Exception as e: continue page += 10 print('Completed!') f.close() def main(): maxpage = input("검색 할 페이지수: ") query = input("검색어: ") s_date = input("시작 날짜(YYYY.MM.DD): ") e_date = input("종료 날짜(YYYY.MM.DD): ") medium = {'경향신문': '1032', '국민일보': '1005', '동아일보': '1020', '문화일보': '1021', '중앙일보': '1025', '한겨레': '1028', '한국경제': '1015', 'KBS': '1056', 'MBC': '1214'} press_name = medium.get(input("언론사 :")) crawler(maxpage, query, s_date, e_date, press) main()
강의와 구글 검색으로 네이버 뉴스를 신문사 선택하여 스크레핑할 수 있게 만들려고 작성한것입니다. 그런데
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_14200/631169102.py in <module>
70 crawler(maxpage, query, s_date, e_date, press)
71
---> 72 main()
~\AppData\Local\Temp/ipykernel_14200/631169102.py in main()
68 press_name = medium.get(input("언론사 :"))
69
---> 70 crawler(maxpage, query, s_date, e_date, press)
71
72 main()
NameError: name 'press' is not defined
이런 에러메세지가 나오는데요.. 이건 어떻게 해결할 수 있을가요?
네. 답변 감사합니다.
추가적으로 main function def crawler 함수에 press 변수를 넣었는데. def main()에서 press 변수가 정의되지 않아서 일까요? 정의되지 않았다는 것이 어디에서 정의되지 않았다는 것일까요?