- 帖子
- 3
- 精华
- 0
- 积分
- 21
- 阅读权限
- 10
- 注册时间
- 2017-8-4
- 最后登录
- 2017-9-19
|
这部分是我通过豆瓣的api获取数据后经过处理插入数据库,功能能正常实现,但因为有些原因,所以top250的某些电影可能豆瓣没有数据,所以插入数据库会出错,于是我用了try except来处理,但当遇到异常之后,程序就会终止,我在excrpt部分最后增加了continue后这会有如下报错,该怎么解决
File "E:\Work\PythonLearn1\hello\gettop250.py", line 34
continue
SyntaxError: 'continue' not properly in loop
------------------------------------------部分代码-------------------------------------------------
#coding=utf-8
import urllib
import json
import time
import regular_msg
import sql_connect
#http://api.douban.com/v2/movie/1291843黑客帝国
#http://api.douban.com/v2/movie/1292052肖申克的救赎
#http://api.douban.com/v2/movie/top250
db = sql_connect.db_connect()#建立数据库连接
dbl = db.cursor()
count_1 = 0
def analysis(json_data,id1):
author = json_data['author']#导演
alt_title = json_data['alt_title'].encode('utf8')#中文名
post_img = json_data['image'].encode('utf8')#海报
title = json_data['title'].encode('utf8')#英文名
summary = json_data['summary'].encode('utf8')#简介
attrs = json_data['attrs']#艺术信息包含上映,语言,主演,编剧一类
link = json_data['mobile_link'].encode('utf8')#豆瓣链接,手机版与pc版已经合并
tags = json_data['tags']#电影标签
author = regular_msg.author_handle(author)
attrs = regular_msg.attrs_handle(attrs)
tags = regular_msg.tags_handle(tags)
id1 = id1.encode('utf8')
sql = ('insert into movies_douban values("'+id1+'","'+author+'","'+alt_title+'","'+post_img+'","'+title+'","'+summary+'","'+attrs['language']+'","'+attrs['pubdate']+'","'+attrs['country']+'","'+attrs['writer']+'","'+attrs['director']+'","'+attrs['cast']+'","'+attrs['movie_duration']+'","'+attrs['year']+'","'+attrs['movie_type']+'","'+link+'","'+tags+'")')
try:
dbl.execute(sql)# 执行sql语句
db.commit()# 提交到数据库执行
print ('成功')
except:
db.rollback()# 如果错误就回滚
print ('失败')
continue
movie_ids = []
for index in range(0,250,50):
response = urllib.urlopen('http://api.douban.com/v2/movie/top250?start=%d&count=50'%index)
data = response.read()
data_json = json.loads(data)
movies250 = data_json['subjects']
for movie in movies250:
movie_ids.append(movie['id'])
print movie['id'],movie['title']
time.sleep(3)
print ('250id获取完成')
for mid in movie_ids:
response = urllib.urlopen('http://api.douban.com/v2/movie/%s'%mid)
data = response.read()
data_json = json.loads(data)#数据整理并插入数据库
analysis(data_json, mid)
count_1 +=1
time.sleep(1.5)
db.close()# 关闭数据库连接
print ('查询了%d条'%count_1)
|
|